Coverage for drivers/LVMSR.py : 48%
Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# LVMSR: VHD and QCOW2 on LVM storage repository
19#
21from sm_typing import Dict, List, override
23import SR
24from SR import deviceCheck
25import VDI
26import SRCommand
27import util
28import lvutil
29import lvmcache
30import scsiutil
31import lock
32import os
33import sys
34import time
35import errno
36import xs_errors
37import cleanup
38import blktap2
39from journaler import Journaler
40from refcounter import RefCounter
41from ipc import IPCFlag
42from constants import NS_PREFIX_LVM, VG_LOCATION, VG_PREFIX, CBT_BLOCK_SIZE
43from cowutil import CowUtil, getCowUtil, getImageStringFromVdiType, getVdiTypeFromImageFormat
44from lvmcowutil import LV_PREFIX, LvmCowUtil
45from lvmanager import LVActivator
46from vditype import VdiType
47import XenAPI # pylint: disable=import-error
48import re
49from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \
50 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \
51 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \
52 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \
53 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
54from metadata import retrieveXMLfromFile, _parseXML
55from xmlrpc.client import DateTime
56import glob
57from constants import CBTLOG_TAG
58from fairlock import Fairlock
59DEV_MAPPER_ROOT = os.path.join('/dev/mapper', VG_PREFIX)
61geneology: Dict[str, List[str]] = {}
62CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM",
63 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR",
64 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
65 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT",
66 "VDI_ACTIVATE", "VDI_DEACTIVATE"]
68CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']]
70DRIVER_INFO = {
71 'name': 'Local VHD and QCOW2 on LVM',
72 'description': 'SR plugin which represents disks as VHD and QCOW2 disks on ' + \
73 'Logical Volumes within a locally-attached Volume Group',
74 'vendor': 'XenSource Inc',
75 'copyright': '(C) 2008 XenSource Inc',
76 'driver_version': '1.0',
77 'required_api_version': '1.0',
78 'capabilities': CAPABILITIES,
79 'configuration': CONFIGURATION
80 }
82CREATE_PARAM_TYPES = {
83 "raw": VdiType.RAW,
84 "vhd": VdiType.VHD,
85 "qcow2": VdiType.QCOW2
86}
88OPS_EXCLUSIVE = [
89 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan",
90 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot",
91 "vdi_clone"]
93# Log if snapshot pauses VM for more than this many seconds
94LONG_SNAPTIME = 60
96class LVMSR(SR.SR):
97 DRIVER_TYPE = 'lvhd'
99 PROVISIONING_TYPES = ["thin", "thick"]
100 PROVISIONING_DEFAULT = "thick"
101 THIN_PLUGIN = "lvhd-thin"
103 PLUGIN_ON_SLAVE = "on-slave"
105 FLAG_USE_VHD = "use_vhd"
106 MDVOLUME_NAME = "MGT"
108 ALLOCATION_QUANTUM = "allocation_quantum"
109 INITIAL_ALLOCATION = "initial_allocation"
111 LOCK_RETRY_INTERVAL = 3
112 LOCK_RETRY_ATTEMPTS = 10
114 TEST_MODE_KEY = "testmode"
115 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin"
116 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator"
117 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end"
118 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin"
119 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data"
120 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata"
121 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end"
123 ENV_VAR_VHD_TEST = {
124 TEST_MODE_VHD_FAIL_REPARENT_BEGIN:
125 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN",
126 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR:
127 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR",
128 TEST_MODE_VHD_FAIL_REPARENT_END:
129 "VHD_UTIL_TEST_FAIL_REPARENT_END",
130 TEST_MODE_VHD_FAIL_RESIZE_BEGIN:
131 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN",
132 TEST_MODE_VHD_FAIL_RESIZE_DATA:
133 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED",
134 TEST_MODE_VHD_FAIL_RESIZE_METADATA:
135 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED",
136 TEST_MODE_VHD_FAIL_RESIZE_END:
137 "VHD_UTIL_TEST_FAIL_RESIZE_END"
138 }
139 testMode = ""
141 legacyMode = True
143 @override
144 @staticmethod
145 def handles(type) -> bool:
146 """Returns True if this SR class understands the given dconf string"""
147 # we can pose as LVMSR or EXTSR for compatibility purposes
148 if __name__ == '__main__':
149 name = sys.argv[0]
150 else:
151 name = __name__
152 if name.endswith("LVMSR"):
153 return type == "lvm"
154 elif name.endswith("EXTSR"):
155 return type == "ext"
156 return type == LVMSR.DRIVER_TYPE
158 def __init__(self, srcmd, sr_uuid):
159 SR.SR.__init__(self, srcmd, sr_uuid)
160 self._init_preferred_image_formats()
162 @override
163 def load(self, sr_uuid) -> None:
164 self.ops_exclusive = OPS_EXCLUSIVE
166 self.isMaster = False
167 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true':
168 self.isMaster = True
170 self.lock = lock.Lock(lock.LOCK_TYPE_SR, self.uuid)
171 self.sr_vditype = SR.DEFAULT_TAP
172 self.uuid = sr_uuid
173 self.vgname = VG_PREFIX + self.uuid
174 self.path = os.path.join(VG_LOCATION, self.vgname)
175 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME)
176 self.provision = self.PROVISIONING_DEFAULT
178 has_sr_ref = self.srcmd.params.get("sr_ref")
179 if has_sr_ref:
180 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref)
181 else:
182 self.other_conf = None
184 self.lvm_conf = None
185 if self.other_conf:
186 self.lvm_conf = self.other_conf.get('lvm-conf')
188 try:
189 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf)
190 except:
191 raise xs_errors.XenError('SRUnavailable', \
192 opterr='Failed to initialise the LVMCache')
193 self.lvActivator = LVActivator(self.uuid, self.lvmCache)
194 self.journaler = Journaler(self.lvmCache)
195 if not has_sr_ref:
196 return # must be a probe call
197 # Test for thick vs thin provisioning conf parameter
198 if 'allocation' in self.dconf: 198 ↛ 199line 198 didn't jump to line 199, because the condition on line 198 was never true
199 if self.dconf['allocation'] in self.PROVISIONING_TYPES:
200 self.provision = self.dconf['allocation']
201 else:
202 raise xs_errors.XenError('InvalidArg', \
203 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES)
205 if self.other_conf.get(self.TEST_MODE_KEY): 205 ↛ 209line 205 didn't jump to line 209, because the condition on line 205 was never false
206 self.testMode = self.other_conf[self.TEST_MODE_KEY]
207 self._prepareTestMode()
209 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
210 # sm_config flag overrides PBD, if any
211 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES:
212 self.provision = self.sm_config.get('allocation')
214 if self.sm_config.get(self.FLAG_USE_VHD) == "true":
215 self.legacyMode = False
217 if lvutil._checkVG(self.vgname):
218 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 218 ↛ 221line 218 didn't jump to line 221, because the condition on line 218 was never false
219 "vdi_activate", "vdi_deactivate"]:
220 self._undoAllJournals()
221 if not self.cmd in ["sr_attach", "sr_probe"]:
222 self._checkMetadataVolume()
224 self.mdexists = False
226 # get a VDI -> TYPE map from the storage
227 contains_uuid_regex = \
228 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*")
229 self.storageVDIs = {}
231 for key in self.lvmCache.lvs.keys(): 231 ↛ 233line 231 didn't jump to line 233, because the loop on line 231 never started
232 # if the lvname has a uuid in it
233 type = None
234 vdi = None
235 if contains_uuid_regex.search(key) is not None:
236 for vdi_type, prefix in LV_PREFIX.items():
237 if key.startswith(prefix):
238 vdi = key[len(prefix):]
239 self.storageVDIs[vdi] = vdi_type
240 break
242 # check if metadata volume exists
243 try:
244 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
245 except:
246 pass
248 @override
249 def cleanup(self) -> None:
250 # we don't need to hold the lock to dec refcounts of activated LVs
251 if not self.lvActivator.deactivateAll(): 251 ↛ 252line 251 didn't jump to line 252, because the condition on line 251 was never true
252 raise util.SMException("failed to deactivate LVs")
254 def updateSRMetadata(self, allocation):
255 try:
256 # Add SR specific SR metadata
257 sr_info = \
258 {ALLOCATION_TAG: allocation,
259 UUID_TAG: self.uuid,
260 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)),
261 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref))
262 }
264 vdi_info = {}
265 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref):
266 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
268 vdi_type = self.session.xenapi.VDI.get_sm_config(vdi).get('vdi_type')
269 if not vdi_type:
270 raise xs_errors.XenError('MetadataError', opterr=f"Missing `vdi_type` for VDI {vdi_uuid}")
272 # Create the VDI entry in the SR metadata
273 vdi_info[vdi_uuid] = \
274 {
275 UUID_TAG: vdi_uuid,
276 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)),
277 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)),
278 IS_A_SNAPSHOT_TAG: \
279 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)),
280 SNAPSHOT_OF_TAG: \
281 self.session.xenapi.VDI.get_snapshot_of(vdi),
282 SNAPSHOT_TIME_TAG: \
283 self.session.xenapi.VDI.get_snapshot_time(vdi),
284 TYPE_TAG: \
285 self.session.xenapi.VDI.get_type(vdi),
286 VDI_TYPE_TAG: \
287 vdi_type,
288 READ_ONLY_TAG: \
289 int(self.session.xenapi.VDI.get_read_only(vdi)),
290 METADATA_OF_POOL_TAG: \
291 self.session.xenapi.VDI.get_metadata_of_pool(vdi),
292 MANAGED_TAG: \
293 int(self.session.xenapi.VDI.get_managed(vdi))
294 }
295 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
297 except Exception as e:
298 raise xs_errors.XenError('MetadataError', \
299 opterr='Error upgrading SR Metadata: %s' % str(e))
301 def syncMetadataAndStorage(self):
302 try:
303 # if a VDI is present in the metadata but not in the storage
304 # then delete it from the metadata
305 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
306 for vdi in list(vdi_info.keys()):
307 update_map = {}
308 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 308 ↛ 315line 308 didn't jump to line 315, because the condition on line 308 was never false
309 # delete this from metadata
310 LVMMetadataHandler(self.mdpath). \
311 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG])
312 else:
313 # search for this in the metadata, compare types
314 # self.storageVDIs is a map of vdi_uuid to vdi_type
315 if vdi_info[vdi][VDI_TYPE_TAG] != \
316 self.storageVDIs[vdi_info[vdi][UUID_TAG]]:
317 # storage type takes authority
318 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \
319 = METADATA_OBJECT_TYPE_VDI
320 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG]
321 update_map[VDI_TYPE_TAG] = \
322 self.storageVDIs[vdi_info[vdi][UUID_TAG]]
323 LVMMetadataHandler(self.mdpath) \
324 .updateMetadata(update_map)
325 else:
326 # This should never happen
327 pass
329 except Exception as e:
330 raise xs_errors.XenError('MetadataError', \
331 opterr='Error synching SR Metadata and storage: %s' % str(e))
333 def syncMetadataAndXapi(self):
334 try:
335 # get metadata
336 (sr_info, vdi_info) = \
337 LVMMetadataHandler(self.mdpath, False).getMetadata()
339 # First synch SR parameters
340 self.update(self.uuid)
342 # Now update the VDI information in the metadata if required
343 for vdi_offset in vdi_info.keys():
344 try:
345 vdi_ref = \
346 self.session.xenapi.VDI.get_by_uuid( \
347 vdi_info[vdi_offset][UUID_TAG])
348 except:
349 # may be the VDI is not in XAPI yet dont bother
350 continue
352 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref))
353 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref))
355 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \
356 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \
357 new_name_description:
358 update_map = {}
359 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
360 METADATA_OBJECT_TYPE_VDI
361 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG]
362 update_map[NAME_LABEL_TAG] = new_name_label
363 update_map[NAME_DESCRIPTION_TAG] = new_name_description
364 LVMMetadataHandler(self.mdpath) \
365 .updateMetadata(update_map)
366 except Exception as e:
367 raise xs_errors.XenError('MetadataError', \
368 opterr='Error synching SR Metadata and XAPI: %s' % str(e))
370 def _checkMetadataVolume(self):
371 util.SMlog("Entering _checkMetadataVolume")
372 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
373 if self.isMaster: 373 ↛ 389line 373 didn't jump to line 389, because the condition on line 373 was never false
374 if self.mdexists and self.cmd == "sr_attach":
375 try:
376 # activate the management volume
377 # will be deactivated at detach time
378 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
379 self._synchSmConfigWithMetaData()
380 util.SMlog("Sync SR metadata and the state on the storage.")
381 self.syncMetadataAndStorage()
382 self.syncMetadataAndXapi()
383 except Exception as e:
384 util.SMlog("Exception in _checkMetadataVolume, " \
385 "Error: %s." % str(e))
386 elif not self.mdexists and not self.legacyMode: 386 ↛ 389line 386 didn't jump to line 389, because the condition on line 386 was never false
387 self._introduceMetaDataVolume()
389 if self.mdexists:
390 self.legacyMode = False
392 def _synchSmConfigWithMetaData(self):
393 util.SMlog("Synching sm-config with metadata volume")
395 try:
396 # get SR info from metadata
397 sr_info = {}
398 map = {}
399 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0]
401 if sr_info == {}: 401 ↛ 402line 401 didn't jump to line 402, because the condition on line 401 was never true
402 raise Exception("Failed to get SR information from metadata.")
404 if "allocation" in sr_info: 404 ↛ 408line 404 didn't jump to line 408, because the condition on line 404 was never false
405 self.provision = sr_info.get("allocation")
406 map['allocation'] = sr_info.get("allocation")
407 else:
408 raise Exception("Allocation key not found in SR metadata. "
409 "SR info found: %s" % sr_info)
411 except Exception as e:
412 raise xs_errors.XenError(
413 'MetadataError',
414 opterr='Error reading SR params from '
415 'metadata Volume: %s' % str(e))
416 try:
417 map[self.FLAG_USE_VHD] = 'true'
418 self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
419 except:
420 raise xs_errors.XenError(
421 'MetadataError',
422 opterr='Error updating sm_config key')
424 def _introduceMetaDataVolume(self):
425 util.SMlog("Creating Metadata volume")
426 try:
427 config = {}
428 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024)
430 # activate the management volume, will be deactivated at detach time
431 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
433 name_label = util.to_plain_string( \
434 self.session.xenapi.SR.get_name_label(self.sr_ref))
435 name_description = util.to_plain_string( \
436 self.session.xenapi.SR.get_name_description(self.sr_ref))
437 config[self.FLAG_USE_VHD] = "true"
438 config['allocation'] = self.provision
439 self.session.xenapi.SR.set_sm_config(self.sr_ref, config)
441 # Add the SR metadata
442 self.updateSRMetadata(self.provision)
443 except Exception as e:
444 raise xs_errors.XenError('MetadataError', \
445 opterr='Error introducing Metadata Volume: %s' % str(e))
447 def _removeMetadataVolume(self):
448 if self.mdexists:
449 try:
450 self.lvmCache.remove(self.MDVOLUME_NAME)
451 except:
452 raise xs_errors.XenError('MetadataError', \
453 opterr='Failed to delete MGT Volume')
455 def _refresh_size(self):
456 """
457 Refreshs the size of the backing device.
458 Return true if all paths/devices agree on the same size.
459 """
460 if hasattr(self, 'SCSIid'): 460 ↛ 462line 460 didn't jump to line 462, because the condition on line 460 was never true
461 # LVMoHBASR, LVMoISCSISR
462 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid'))
463 else:
464 # LVMSR
465 devices = self.dconf['device'].split(',')
466 scsiutil.refreshdev(devices)
467 return True
469 def _expand_size(self):
470 """
471 Expands the size of the SR by growing into additional availiable
472 space, if extra space is availiable on the backing device.
473 Needs to be called after a successful call of _refresh_size.
474 """
475 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size']
476 # We are comparing PV- with VG-sizes that are aligned. Need a threshold
477 resizethreshold = 100 * 1024 * 1024 # 100MB
478 devices = self.dconf['device'].split(',')
479 totaldevicesize = 0
480 for device in devices:
481 totaldevicesize = totaldevicesize + scsiutil.getsize(device)
482 if totaldevicesize >= (currentvgsize + resizethreshold):
483 try:
484 if hasattr(self, 'SCSIid'): 484 ↛ 486line 484 didn't jump to line 486, because the condition on line 484 was never true
485 # LVMoHBASR, LVMoISCSISR might have slaves
486 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session,
487 getattr(self, 'SCSIid'))
488 util.SMlog("LVMSR._expand_size for %s will resize the pv." %
489 self.uuid)
490 for pv in lvutil.get_pv_for_vg(self.vgname):
491 lvutil.resizePV(pv)
492 except:
493 util.logException("LVMSR._expand_size for %s failed to resize"
494 " the PV" % self.uuid)
496 @override
497 @deviceCheck
498 def create(self, uuid, size) -> None:
499 util.SMlog("LVMSR.create for %s" % self.uuid)
500 if not self.isMaster:
501 util.SMlog('sr_create blocked for non-master')
502 raise xs_errors.XenError('LVMMaster')
504 if lvutil._checkVG(self.vgname):
505 raise xs_errors.XenError('SRExists')
507 # Check none of the devices already in use by other PBDs
508 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']):
509 raise xs_errors.XenError('SRInUse')
511 # Check serial number entry in SR records
512 for dev in self.dconf['device'].split(','):
513 if util.test_scsiserial(self.session, dev):
514 raise xs_errors.XenError('SRInUse')
516 lvutil.createVG(self.dconf['device'], self.vgname)
518 #Update serial number string
519 scsiutil.add_serial_record(self.session, self.sr_ref, \
520 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
522 # since this is an SR.create turn off legacy mode
523 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \
524 self.FLAG_USE_VHD, 'true')
526 @override
527 def delete(self, uuid) -> None:
528 util.SMlog("LVMSR.delete for %s" % self.uuid)
529 if not self.isMaster:
530 raise xs_errors.XenError('LVMMaster')
531 cleanup.gc_force(self.session, self.uuid)
533 success = True
534 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
535 if util.extractSRFromDevMapper(fileName) != self.uuid:
536 continue
538 if util.doesFileHaveOpenHandles(fileName):
539 util.SMlog("LVMSR.delete: The dev mapper entry %s has open " \
540 "handles" % fileName)
541 success = False
542 continue
544 # Now attempt to remove the dev mapper entry
545 if not lvutil.removeDevMapperEntry(fileName, False):
546 success = False
547 continue
549 try:
550 lvname = os.path.basename(fileName.replace('-', '/'). \
551 replace('//', '-'))
552 lpath = os.path.join(self.path, lvname)
553 os.unlink(lpath)
554 except OSError as e:
555 if e.errno != errno.ENOENT:
556 util.SMlog("LVMSR.delete: failed to remove the symlink for " \
557 "file %s. Error: %s" % (fileName, str(e)))
558 success = False
560 if success:
561 try:
562 if util.pathexists(self.path):
563 os.rmdir(self.path)
564 except Exception as e:
565 util.SMlog("LVMSR.delete: failed to remove the symlink " \
566 "directory %s. Error: %s" % (self.path, str(e)))
567 success = False
569 self._removeMetadataVolume()
570 self.lvmCache.refresh()
571 if LvmCowUtil.getVolumeInfo(self.lvmCache):
572 raise xs_errors.XenError('SRNotEmpty')
574 if not success:
575 raise Exception("LVMSR delete failed, please refer to the log " \
576 "for details.")
578 lvutil.removeVG(self.dconf['device'], self.vgname)
579 self._cleanup()
581 @override
582 def attach(self, uuid) -> None:
583 util.SMlog("LVMSR.attach for %s" % self.uuid)
585 self._cleanup(True) # in case of host crashes, if detach wasn't called
587 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 587 ↛ 588line 587 didn't jump to line 588, because the condition on line 587 was never true
588 raise xs_errors.XenError('SRUnavailable', \
589 opterr='no such volume group: %s' % self.vgname)
591 # Refresh the metadata status
592 self._checkMetadataVolume()
594 refreshsizeok = self._refresh_size()
596 if self.isMaster: 596 ↛ 607line 596 didn't jump to line 607, because the condition on line 596 was never false
597 if refreshsizeok: 597 ↛ 601line 597 didn't jump to line 601, because the condition on line 597 was never false
598 self._expand_size()
600 # Update SCSIid string
601 util.SMlog("Calling devlist_to_serial")
602 scsiutil.add_serial_record(
603 self.session, self.sr_ref,
604 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
606 # Test Legacy Mode Flag and update if COW volumes exist
607 if self.isMaster and self.legacyMode: 607 ↛ 608line 607 didn't jump to line 608, because the condition on line 607 was never true
608 vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache)
609 for uuid, info in vdiInfo.items():
610 if VdiType.isCowImage(info.vdiType):
611 self.legacyMode = False
612 map = self.session.xenapi.SR.get_sm_config(self.sr_ref)
613 self._introduceMetaDataVolume()
614 break
616 # Set the block scheduler
617 for dev in self.dconf['device'].split(','):
618 self.block_setscheduler(dev)
620 @override
621 def detach(self, uuid) -> None:
622 util.SMlog("LVMSR.detach for %s" % self.uuid)
623 cleanup.abort(self.uuid)
625 # Do a best effort cleanup of the dev mapper entries
626 # go through all devmapper entries for this VG
627 success = True
628 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
629 if util.extractSRFromDevMapper(fileName) != self.uuid: 629 ↛ 630line 629 didn't jump to line 630, because the condition on line 629 was never true
630 continue
632 with Fairlock('devicemapper'):
633 # check if any file has open handles
634 if util.doesFileHaveOpenHandles(fileName):
635 # if yes, log this and signal failure
636 util.SMlog(
637 f"LVMSR.detach: The dev mapper entry {fileName} has "
638 "open handles")
639 success = False
640 continue
642 # Now attempt to remove the dev mapper entry
643 if not lvutil.removeDevMapperEntry(fileName, False): 643 ↛ 644line 643 didn't jump to line 644, because the condition on line 643 was never true
644 success = False
645 continue
647 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/*
648 try:
649 lvname = os.path.basename(fileName.replace('-', '/'). \
650 replace('//', '-'))
651 lvname = os.path.join(self.path, lvname)
652 util.force_unlink(lvname)
653 except Exception as e:
654 util.SMlog("LVMSR.detach: failed to remove the symlink for " \
655 "file %s. Error: %s" % (fileName, str(e)))
656 success = False
658 # now remove the directory where the symlinks are
659 # this should pass as the directory should be empty by now
660 if success:
661 try:
662 if util.pathexists(self.path): 662 ↛ 663line 662 didn't jump to line 663, because the condition on line 662 was never true
663 os.rmdir(self.path)
664 except Exception as e:
665 util.SMlog("LVMSR.detach: failed to remove the symlink " \
666 "directory %s. Error: %s" % (self.path, str(e)))
667 success = False
669 if not success:
670 raise Exception("SR detach failed, please refer to the log " \
671 "for details.")
673 # Don't delete lock files on the master as it will break the locking
674 # between SM and any GC thread that survives through SR.detach.
675 # However, we should still delete lock files on slaves as it is the
676 # only place to do so.
677 self._cleanup(self.isMaster)
679 @override
680 def forget_vdi(self, uuid) -> None:
681 if not self.legacyMode:
682 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid)
683 super(LVMSR, self).forget_vdi(uuid)
685 @override
686 def scan(self, uuid) -> None:
687 activated_lvs = set()
688 try:
689 util.SMlog("LVMSR.scan for %s" % self.uuid)
690 if not self.isMaster: 690 ↛ 691line 690 didn't jump to line 691, because the condition on line 690 was never true
691 util.SMlog('sr_scan blocked for non-master')
692 raise xs_errors.XenError('LVMMaster')
694 if self._refresh_size(): 694 ↛ 696line 694 didn't jump to line 696, because the condition on line 694 was never false
695 self._expand_size()
696 self.lvmCache.refresh()
697 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG)
698 self._loadvdis()
699 stats = lvutil._getVGstats(self.vgname)
700 self.physical_size = stats['physical_size']
701 self.physical_utilisation = stats['physical_utilisation']
703 # Now check if there are any VDIs in the metadata, which are not in
704 # XAPI
705 if self.mdexists: 705 ↛ 816line 705 didn't jump to line 816, because the condition on line 705 was never false
706 vdiToSnaps: Dict[str, List[str]] = {}
707 # get VDIs from XAPI
708 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref)
709 vdi_uuids = set([])
710 for vdi in vdis:
711 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
713 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
715 for vdi in list(info.keys()):
716 vdi_uuid = info[vdi][UUID_TAG]
717 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 717 ↛ 718line 717 didn't jump to line 718, because the condition on line 717 was never true
718 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps:
719 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid)
720 else:
721 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
723 if vdi_uuid not in vdi_uuids:
724 util.SMlog("Introduce VDI %s as it is present in " \
725 "metadata and not in XAPI." % vdi_uuid)
726 vdi_type = info[vdi][VDI_TYPE_TAG]
727 sm_config = {}
728 sm_config['vdi_type'] = vdi_type
729 lvname = "%s%s" % (LV_PREFIX[sm_config['vdi_type']], vdi_uuid)
730 self.lvActivator.activate(
731 vdi_uuid, lvname, LVActivator.NORMAL)
732 activated_lvs.add(vdi_uuid)
733 lvPath = os.path.join(self.path, lvname)
735 if not VdiType.isCowImage(vdi_type): 735 ↛ 736line 735 didn't jump to line 736, because the condition on line 735 was never true
736 size = self.lvmCache.getSize(LV_PREFIX[vdi_type] + vdi_uuid)
737 utilisation = \
738 util.roundup(lvutil.LVM_SIZE_INCREMENT,
739 int(size))
740 else:
741 cowutil = getCowUtil(vdi_type)
742 lvmcowutil = LvmCowUtil(cowutil)
744 parent = cowutil.getParentNoCheck(lvPath)
746 if parent is not None: 746 ↛ 747line 746 didn't jump to line 747, because the condition on line 746 was never true
747 sm_config['vhd-parent'] = parent[parent.find('-') + 1:]
748 size = cowutil.getSizeVirt(lvPath)
749 if self.provision == "thin": 749 ↛ 750line 749 didn't jump to line 750, because the condition on line 749 was never true
750 utilisation = util.roundup(
751 lvutil.LVM_SIZE_INCREMENT,
752 cowutil.calcOverheadEmpty(max(size, cowutil.getDefaultPreallocationSizeVirt()))
753 )
754 else:
755 utilisation = lvmcowutil.calcVolumeSize(int(size))
757 vdi_ref = self.session.xenapi.VDI.db_introduce(
758 vdi_uuid,
759 info[vdi][NAME_LABEL_TAG],
760 info[vdi][NAME_DESCRIPTION_TAG],
761 self.sr_ref,
762 info[vdi][TYPE_TAG],
763 False,
764 bool(int(info[vdi][READ_ONLY_TAG])),
765 {},
766 vdi_uuid,
767 {},
768 sm_config)
770 self.session.xenapi.VDI.set_managed(vdi_ref,
771 bool(int(info[vdi][MANAGED_TAG])))
772 self.session.xenapi.VDI.set_virtual_size(vdi_ref,
773 str(size))
774 self.session.xenapi.VDI.set_physical_utilisation( \
775 vdi_ref, str(utilisation))
776 self.session.xenapi.VDI.set_is_a_snapshot( \
777 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG])))
778 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 778 ↛ 779line 778 didn't jump to line 779, because the condition on line 778 was never true
779 self.session.xenapi.VDI.set_snapshot_time( \
780 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG]))
781 if info[vdi][TYPE_TAG] == 'metadata': 781 ↛ 782line 781 didn't jump to line 782, because the condition on line 781 was never true
782 self.session.xenapi.VDI.set_metadata_of_pool( \
783 vdi_ref, info[vdi][METADATA_OF_POOL_TAG])
785 # Update CBT status of disks either just added
786 # or already in XAPI
787 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG)
788 if cbt_logname in cbt_vdis: 788 ↛ 789line 788 didn't jump to line 789, because the condition on line 788 was never true
789 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
790 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True)
791 # For existing VDIs, update local state too
792 # Scan in base class SR updates existing VDIs
793 # again based on local states
794 if vdi_uuid in self.vdis:
795 self.vdis[vdi_uuid].cbt_enabled = True
796 cbt_vdis.remove(cbt_logname)
798 # Now set the snapshot statuses correctly in XAPI
799 for srcvdi in vdiToSnaps.keys(): 799 ↛ 800line 799 didn't jump to line 800, because the loop on line 799 never started
800 try:
801 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi)
802 except:
803 # the source VDI no longer exists, continue
804 continue
806 for snapvdi in vdiToSnaps[srcvdi]:
807 try:
808 # this might fail in cases where its already set
809 snapref = \
810 self.session.xenapi.VDI.get_by_uuid(snapvdi)
811 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref)
812 except Exception as e:
813 util.SMlog("Setting snapshot failed. " \
814 "Error: %s" % str(e))
816 if cbt_vdis: 816 ↛ 827line 816 didn't jump to line 827, because the condition on line 816 was never false
817 # If we have items remaining in this list,
818 # they are cbt_metadata VDI that XAPI doesn't know about
819 # Add them to self.vdis and they'll get added to the DB
820 for cbt_vdi in cbt_vdis: 820 ↛ 821line 820 didn't jump to line 821, because the loop on line 820 never started
821 cbt_uuid = cbt_vdi.split(".")[0]
822 new_vdi = self.vdi(cbt_uuid)
823 new_vdi.ty = "cbt_metadata"
824 new_vdi.cbt_enabled = True
825 self.vdis[cbt_uuid] = new_vdi
827 super(LVMSR, self).scan(uuid)
828 self._kickGC()
830 finally:
831 for vdi in activated_lvs:
832 self.lvActivator.deactivate(
833 vdi, LVActivator.NORMAL, False)
835 @override
836 def update(self, uuid) -> None:
837 if not lvutil._checkVG(self.vgname): 837 ↛ 838line 837 didn't jump to line 838, because the condition on line 837 was never true
838 return
839 self._updateStats(uuid, 0)
841 if self.legacyMode: 841 ↛ 842line 841 didn't jump to line 842, because the condition on line 841 was never true
842 return
844 # synch name_label in metadata with XAPI
845 update_map = {}
846 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \
847 METADATA_OBJECT_TYPE_SR,
848 NAME_LABEL_TAG: util.to_plain_string( \
849 self.session.xenapi.SR.get_name_label(self.sr_ref)),
850 NAME_DESCRIPTION_TAG: util.to_plain_string( \
851 self.session.xenapi.SR.get_name_description(self.sr_ref))
852 }
853 LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
855 def _updateStats(self, uuid, virtAllocDelta):
856 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref))
857 self.virtual_allocation = valloc + virtAllocDelta
858 util.SMlog("Setting virtual_allocation of SR %s to %d" %
859 (uuid, self.virtual_allocation))
860 stats = lvutil._getVGstats(self.vgname)
861 self.physical_size = stats['physical_size']
862 self.physical_utilisation = stats['physical_utilisation']
863 self._db_update()
865 @override
866 @deviceCheck
867 def probe(self) -> str:
868 return lvutil.srlist_toxml(
869 lvutil.scan_srlist(VG_PREFIX, self.dconf['device']),
870 VG_PREFIX,
871 ('metadata' in self.srcmd.params['sr_sm_config'] and \
872 self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
874 @override
875 def vdi(self, uuid) -> VDI.VDI:
876 return LVMVDI(self, uuid)
878 def _loadvdis(self):
879 self.virtual_allocation = 0
880 self.vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache)
881 self.allVDIs = {}
883 for uuid, info in self.vdiInfo.items():
884 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 884 ↛ 885line 884 didn't jump to line 885, because the condition on line 884 was never true
885 continue
886 if info.scanError: 886 ↛ 887line 886 didn't jump to line 887, because the condition on line 886 was never true
887 raise xs_errors.XenError('VDIUnavailable', \
888 opterr='Error scanning VDI %s' % uuid)
889 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid)
890 if not self.vdis[uuid].hidden: 890 ↛ 883line 890 didn't jump to line 883, because the condition on line 890 was never false
891 self.virtual_allocation += self.vdis[uuid].utilisation
893 for uuid, vdi in self.vdis.items():
894 if vdi.parent: 894 ↛ 895line 894 didn't jump to line 895, because the condition on line 894 was never true
895 if vdi.parent in self.vdis:
896 self.vdis[vdi.parent].read_only = True
897 if vdi.parent in geneology:
898 geneology[vdi.parent].append(uuid)
899 else:
900 geneology[vdi.parent] = [uuid]
902 # Now remove all hidden leaf nodes to avoid introducing records that
903 # will be GC'ed
904 for uuid in list(self.vdis.keys()):
905 if uuid not in geneology and self.vdis[uuid].hidden: 905 ↛ 906line 905 didn't jump to line 906, because the condition on line 905 was never true
906 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid)
907 del self.vdis[uuid]
909 def _ensureSpaceAvailable(self, amount_needed):
910 space_available = lvutil._getVGstats(self.vgname)['freespace']
911 if (space_available < amount_needed):
912 util.SMlog("Not enough space! free space: %d, need: %d" % \
913 (space_available, amount_needed))
914 raise xs_errors.XenError('SRNoSpace')
916 def _handleInterruptedCloneOps(self):
917 entries = self.journaler.getAll(LVMVDI.JRN_CLONE)
918 for uuid, val in entries.items(): 918 ↛ 919line 918 didn't jump to line 919, because the loop on line 918 never started
919 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid)
920 self._handleInterruptedCloneOp(uuid, val)
921 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid)
922 self.journaler.remove(LVMVDI.JRN_CLONE, uuid)
924 def _handleInterruptedCoalesceLeaf(self):
925 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF)
926 if len(entries) > 0: 926 ↛ 927line 926 didn't jump to line 927, because the condition on line 926 was never true
927 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***")
928 cleanup.gc_force(self.session, self.uuid)
929 self.lvmCache.refresh()
931 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False):
932 """Either roll back or finalize the interrupted snapshot/clone
933 operation. Rolling back is unsafe if the leaf images have already been
934 in use and written to. However, it is always safe to roll back while
935 we're still in the context of the failed snapshot operation since the
936 VBD is paused for the duration of the operation"""
937 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval))
938 lvs = LvmCowUtil.getVolumeInfo(self.lvmCache)
939 baseUuid, clonUuid = jval.split("_")
941 # is there a "base copy" VDI?
942 if not lvs.get(baseUuid):
943 # no base copy: make sure the original is there
944 if lvs.get(origUuid):
945 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do")
946 return
947 raise util.SMException("base copy %s not present, " \
948 "but no original %s found" % (baseUuid, origUuid))
950 vdis = LvmCowUtil.getVDIInfo(self.lvmCache)
951 base = vdis[baseUuid]
952 cowutil = getCowUtil(base.vdiType)
954 if forceUndo:
955 util.SMlog("Explicit revert")
956 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
957 return
959 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)):
960 util.SMlog("One or both leaves missing => revert")
961 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
962 return
964 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError):
965 util.SMlog("One or both leaves invalid => revert")
966 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
967 return
969 orig = vdis[origUuid]
970 self.lvActivator.activate(baseUuid, base.lvName, False)
971 self.lvActivator.activate(origUuid, orig.lvName, False)
972 if orig.parentUuid != baseUuid:
973 parent = vdis[orig.parentUuid]
974 self.lvActivator.activate(parent.uuid, parent.lvName, False)
975 origPath = os.path.join(self.path, orig.lvName)
977 if cowutil.check(origPath) != CowUtil.CheckResult.Success:
978 util.SMlog("Orig image invalid => revert")
979 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
980 return
982 if clonUuid:
983 clon = vdis[clonUuid]
984 clonPath = os.path.join(self.path, clon.lvName)
985 self.lvActivator.activate(clonUuid, clon.lvName, False)
986 if cowutil.check(clonPath) != CowUtil.CheckResult.Success:
987 util.SMlog("Clon image invalid => revert")
988 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
989 return
991 util.SMlog("Snapshot appears valid, will not roll back")
992 self._completeCloneOp(cowutil, vdis, origUuid, baseUuid, clonUuid)
994 def _undoCloneOp(self, cowutil, lvs, origUuid, baseUuid, clonUuid):
995 base = lvs[baseUuid]
996 basePath = os.path.join(self.path, base.name)
998 # make the parent RW
999 if base.readonly:
1000 self.lvmCache.setReadonly(base.name, False)
1002 ns = NS_PREFIX_LVM + self.uuid
1003 origRefcountBinary = RefCounter.check(origUuid, ns)[1]
1004 origRefcountNormal = 0
1006 # un-hide the parent
1007 if VdiType.isCowImage(base.vdiType):
1008 self.lvActivator.activate(baseUuid, base.name, False)
1009 origRefcountNormal = 1
1010 imageInfo = cowutil.getInfo(basePath, LvmCowUtil.extractUuid, False)
1011 if imageInfo.hidden:
1012 cowutil.setHidden(basePath, False)
1013 elif base.hidden:
1014 self.lvmCache.setHidden(base.name, False)
1016 # remove the child nodes
1017 if clonUuid and lvs.get(clonUuid):
1018 if not VdiType.isCowImage(lvs[clonUuid].vdiType):
1019 raise util.SMException("clone %s not a COW image" % clonUuid)
1020 self.lvmCache.remove(lvs[clonUuid].name)
1021 if self.lvActivator.get(clonUuid, False):
1022 self.lvActivator.remove(clonUuid, False)
1023 if lvs.get(origUuid):
1024 self.lvmCache.remove(lvs[origUuid].name)
1026 # inflate the parent to fully-allocated size
1027 if VdiType.isCowImage(base.vdiType):
1028 lvmcowutil = LvmCowUtil(cowutil)
1029 fullSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt)
1030 lvmcowutil.inflate(self.journaler, self.uuid, baseUuid, base.vdiType, fullSize)
1032 # rename back
1033 origLV = LV_PREFIX[base.vdiType] + origUuid
1034 self.lvmCache.rename(base.name, origLV)
1035 RefCounter.reset(baseUuid, ns)
1036 if self.lvActivator.get(baseUuid, False):
1037 self.lvActivator.replace(baseUuid, origUuid, origLV, False)
1038 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
1040 # At this stage, tapdisk and SM vdi will be in paused state. Remove
1041 # flag to facilitate vm deactivate
1042 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid)
1043 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
1045 # update LVM metadata on slaves
1046 slaves = util.get_slaves_attached_on(self.session, [origUuid])
1047 LvmCowUtil.refreshVolumeOnSlaves(self.session, self.uuid, self.vgname,
1048 origLV, origUuid, slaves)
1050 util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
1052 def _completeCloneOp(self, cowutil, vdis, origUuid, baseUuid, clonUuid):
1053 """Finalize the interrupted snapshot/clone operation. This must not be
1054 called from the live snapshot op context because we attempt to pause/
1055 unpause the VBD here (the VBD is already paused during snapshot, so it
1056 would cause a deadlock)"""
1057 base = vdis[baseUuid]
1058 clon = None
1059 if clonUuid:
1060 clon = vdis[clonUuid]
1062 cleanup.abort(self.uuid)
1064 # make sure the parent is hidden and read-only
1065 if not base.hidden:
1066 if not VdiType.isCowImage(base.vdiType):
1067 self.lvmCache.setHidden(base.lvName)
1068 else:
1069 basePath = os.path.join(self.path, base.lvName)
1070 cowutil.setHidden(basePath)
1071 if not base.lvReadonly:
1072 self.lvmCache.setReadonly(base.lvName, True)
1074 # NB: since this snapshot-preserving call is only invoked outside the
1075 # snapshot op context, we assume the LVM metadata on the involved slave
1076 # has by now been refreshed and do not attempt to do it here
1078 # Update the original record
1079 try:
1080 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid)
1081 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
1082 type = self.session.xenapi.VDI.get_type(vdi_ref)
1083 sm_config["vdi_type"] = vdis[origUuid].vdiType
1084 sm_config['vhd-parent'] = baseUuid
1085 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config)
1086 except XenAPI.Failure:
1087 util.SMlog("ERROR updating the orig record")
1089 # introduce the new VDI records
1090 if clonUuid:
1091 try:
1092 clon_vdi = VDI.VDI(self, clonUuid)
1093 clon_vdi.read_only = False
1094 clon_vdi.location = clonUuid
1095 clon_vdi.utilisation = clon.sizeLV
1096 clon_vdi.sm_config = {
1097 "vdi_type": clon.vdiType,
1098 "vhd-parent": baseUuid}
1100 if not self.legacyMode:
1101 LVMMetadataHandler(self.mdpath). \
1102 ensureSpaceIsAvailableForVdis(1)
1104 clon_vdi_ref = clon_vdi._db_introduce()
1105 util.SMlog("introduced clon VDI: %s (%s)" % \
1106 (clon_vdi_ref, clonUuid))
1108 vdi_info = {UUID_TAG: clonUuid,
1109 NAME_LABEL_TAG: clon_vdi.label,
1110 NAME_DESCRIPTION_TAG: clon_vdi.description,
1111 IS_A_SNAPSHOT_TAG: 0,
1112 SNAPSHOT_OF_TAG: '',
1113 SNAPSHOT_TIME_TAG: '',
1114 TYPE_TAG: type,
1115 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'],
1116 READ_ONLY_TAG: int(clon_vdi.read_only),
1117 MANAGED_TAG: int(clon_vdi.managed),
1118 METADATA_OF_POOL_TAG: ''
1119 }
1121 if not self.legacyMode:
1122 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1124 except XenAPI.Failure:
1125 util.SMlog("ERROR introducing the clon record")
1127 try:
1128 base_vdi = VDI.VDI(self, baseUuid) # readonly parent
1129 base_vdi.label = "base copy"
1130 base_vdi.read_only = True
1131 base_vdi.location = baseUuid
1132 base_vdi.size = base.sizeVirt
1133 base_vdi.utilisation = base.sizeLV
1134 base_vdi.managed = False
1135 base_vdi.sm_config = {
1136 "vdi_type": base.vdiType,
1137 "vhd-parent": baseUuid}
1139 if not self.legacyMode:
1140 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
1142 base_vdi_ref = base_vdi._db_introduce()
1143 util.SMlog("introduced base VDI: %s (%s)" % \
1144 (base_vdi_ref, baseUuid))
1146 vdi_info = {UUID_TAG: baseUuid,
1147 NAME_LABEL_TAG: base_vdi.label,
1148 NAME_DESCRIPTION_TAG: base_vdi.description,
1149 IS_A_SNAPSHOT_TAG: 0,
1150 SNAPSHOT_OF_TAG: '',
1151 SNAPSHOT_TIME_TAG: '',
1152 TYPE_TAG: type,
1153 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'],
1154 READ_ONLY_TAG: int(base_vdi.read_only),
1155 MANAGED_TAG: int(base_vdi.managed),
1156 METADATA_OF_POOL_TAG: ''
1157 }
1159 if not self.legacyMode:
1160 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1161 except XenAPI.Failure:
1162 util.SMlog("ERROR introducing the base record")
1164 util.SMlog("*** INTERRUPTED CLONE OP: complete")
1166 def _undoAllJournals(self):
1167 """Undo all COW image & SM interrupted journaled operations. This call must
1168 be serialized with respect to all operations that create journals"""
1169 # undoing interrupted inflates must be done first, since undoing COW images
1170 # ops might require inflations
1171 self.lock.acquire()
1172 try:
1173 self._undoAllInflateJournals()
1174 self._undoAllCowJournals()
1175 self._handleInterruptedCloneOps()
1176 self._handleInterruptedCoalesceLeaf()
1177 finally:
1178 self.lock.release()
1179 self.cleanup()
1181 def _undoAllInflateJournals(self):
1182 entries = self.journaler.getAll(LvmCowUtil.JOURNAL_INFLATE)
1183 if len(entries) == 0:
1184 return
1185 self._loadvdis()
1186 for uuid, val in entries.items():
1187 vdi = self.vdis.get(uuid)
1188 if vdi: 1188 ↛ 1208line 1188 didn't jump to line 1208, because the condition on line 1188 was never false
1189 util.SMlog("Found inflate journal %s, deflating %s to %s" % \
1190 (uuid, vdi.path, val))
1191 if vdi.readonly: 1191 ↛ 1192line 1191 didn't jump to line 1192, because the condition on line 1191 was never true
1192 self.lvmCache.setReadonly(vdi.lvname, False)
1193 self.lvActivator.activate(uuid, vdi.lvname, False)
1194 currSizeLV = self.lvmCache.getSize(vdi.lvname)
1196 cowutil = getCowUtil(vdi.vdi_type)
1197 lvmcowutil = LvmCowUtil(cowutil)
1199 footer_size = cowutil.getFooterSize()
1200 util.zeroOut(vdi.path, currSizeLV - footer_size, footer_size)
1201 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(val))
1202 if vdi.readonly: 1202 ↛ 1203line 1202 didn't jump to line 1203, because the condition on line 1202 was never true
1203 self.lvmCache.setReadonly(vdi.lvname, True)
1204 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1204 ↛ 1205line 1204 didn't jump to line 1205, because the condition on line 1204 was never true
1205 LvmCowUtil.refreshVolumeOnAllSlaves(
1206 self.session, self.uuid, self.vgname, vdi.lvname, uuid
1207 )
1208 self.journaler.remove(LvmCowUtil.JOURNAL_INFLATE, uuid)
1209 delattr(self, "vdiInfo")
1210 delattr(self, "allVDIs")
1212 def _undoAllCowJournals(self):
1213 """
1214 Check if there are COW journals in existence and revert them.
1215 """
1216 journals = LvmCowUtil.getAllResizeJournals(self.lvmCache)
1217 if len(journals) == 0: 1217 ↛ 1219line 1217 didn't jump to line 1219, because the condition on line 1217 was never false
1218 return
1219 self._loadvdis()
1221 for uuid, jlvName in journals:
1222 vdi = self.vdis[uuid]
1223 util.SMlog("Found COW journal %s, reverting %s" % (uuid, vdi.path))
1224 cowutil = getCowUtil(vdi.vdi_type)
1225 lvmcowutil = LvmCowUtil(cowutil)
1227 self.lvActivator.activate(uuid, vdi.lvname, False)
1228 self.lvmCache.activateNoRefcount(jlvName)
1229 fullSize = lvmcowutil.calcVolumeSize(vdi.size)
1230 lvmcowutil.inflate(self.journaler, self.uuid, vdi.uuid, vdi.vdi_type, fullSize)
1231 try:
1232 jFile = os.path.join(self.path, jlvName)
1233 cowutil.revert(vdi.path, jFile)
1234 except util.CommandException:
1235 util.logException("COW journal revert")
1236 cowutil.check(vdi.path)
1237 util.SMlog("COW image revert failed but COW image ok: removing journal")
1238 # Attempt to reclaim unused space
1241 imageInfo = cowutil.getInfo(vdi.path, LvmCowUtil.extractUuid, False)
1242 NewSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt)
1243 if NewSize < fullSize:
1244 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(NewSize))
1245 LvmCowUtil.refreshVolumeOnAllSlaves(self.session, self.uuid, self.vgname, vdi.lvname, uuid)
1246 self.lvmCache.remove(jlvName)
1247 delattr(self, "vdiInfo")
1248 delattr(self, "allVDIs")
1250 def call_on_slave(self, args, host_refs, message: str):
1251 master_ref = util.get_this_host_ref(self.session)
1252 for hostRef in host_refs:
1253 if hostRef == master_ref: 1253 ↛ 1254line 1253 didn't jump to line 1254, because the condition on line 1253 was never true
1254 continue
1255 util.SMlog(f"{message} on slave {hostRef}")
1256 rv = self.session.xenapi.host.call_plugin(
1257 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1258 util.SMlog("call-plugin returned: %s" % rv)
1259 if not rv: 1259 ↛ 1260line 1259 didn't jump to line 1260, because the condition on line 1259 was never true
1260 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1262 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV,
1263 baseUuid, baseLV):
1264 """We need to reactivate the original LV on each slave (note that the
1265 name for the original LV might change), as well as init the refcount
1266 for the base LV"""
1267 args = {"vgName": self.vgname,
1268 "action1": "refresh",
1269 "lvName1": origLV,
1270 "action2": "activate",
1271 "ns2": NS_PREFIX_LVM + self.uuid,
1272 "lvName2": baseLV,
1273 "uuid2": baseUuid}
1275 message = f"Updating {origOldLV}, {origLV}, {baseLV}"
1276 self.call_on_slave(args, hostRefs, message)
1278 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog):
1279 """Reactivate and refresh CBT log file on slaves"""
1280 args = {"vgName": self.vgname,
1281 "action1": "deactivateNoRefcount",
1282 "lvName1": cbtlog,
1283 "action2": "refresh",
1284 "lvName2": cbtlog}
1286 message = f"Updating {cbtlog}"
1287 self.call_on_slave(args, hostRefs, message)
1289 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV):
1290 """Tell the slave we deleted the base image"""
1291 args = {"vgName": self.vgname,
1292 "action1": "cleanupLockAndRefcount",
1293 "uuid1": baseUuid,
1294 "ns1": NS_PREFIX_LVM + self.uuid}
1296 message = f"Cleaning locks for {baseLV}"
1297 self.call_on_slave(args, hostRefs, message)
1299 def _deactivateOnSlave(self, hostRefs, lvname):
1300 """Tell the slave we need to deactivate the base image"""
1301 args = {
1302 "vgName": self.vgname,
1303 "action1": "deactivateNoRefcount",
1304 "lvName1": lvname}
1306 message = f"Deactivating {lvname}"
1307 self.call_on_slave(args, hostRefs, message)
1309 def _cleanup(self, skipLockCleanup=False):
1310 """delete stale refcounter, flag, and lock files"""
1311 RefCounter.resetAll(NS_PREFIX_LVM + self.uuid)
1312 IPCFlag(self.uuid).clearAll()
1313 if not skipLockCleanup: 1313 ↛ 1314line 1313 didn't jump to line 1314, because the condition on line 1313 was never true
1314 lock.Lock.cleanupAll(self.uuid)
1315 lock.Lock.cleanupAll(NS_PREFIX_LVM + self.uuid)
1317 def _prepareTestMode(self):
1318 util.SMlog("Test mode: %s" % self.testMode)
1319 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1319 ↛ 1320line 1319 didn't jump to line 1320, because the condition on line 1319 was never true
1320 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes"
1321 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
1323 def _kickGC(self):
1324 util.SMlog("Kicking GC")
1325 cleanup.start_gc_service(self.uuid)
1327 def ensureCBTSpace(self, virtual_size=0):
1328 # Ensure we have space for at least one LV
1329 size = max(util.roundup(CBT_BLOCK_SIZE, virtual_size//CBT_BLOCK_SIZE), self.journaler.LV_SIZE)
1330 self._ensureSpaceAvailable(size)
1333class LVMVDI(VDI.VDI):
1335 JRN_CLONE = "clone" # journal entry type for the clone operation
1337 @override
1338 def load(self, vdi_uuid) -> None:
1339 self.lock = self.sr.lock
1340 self.lvActivator = self.sr.lvActivator
1341 self.loaded = False
1342 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1342 ↛ 1344line 1342 didn't jump to line 1344, because the condition on line 1342 was never false
1343 self._setType(VdiType.RAW)
1344 self.uuid = vdi_uuid
1345 self.location = self.uuid
1346 self.exists = True
1348 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid):
1349 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid])
1350 if self.parent: 1350 ↛ 1351line 1350 didn't jump to line 1351, because the condition on line 1350 was never true
1351 self.sm_config_override['vhd-parent'] = self.parent
1352 else:
1353 self.sm_config_override['vhd-parent'] = None
1354 return
1356 # scan() didn't run: determine the type of the VDI manually
1357 if self._determineType(): 1357 ↛ 1361line 1357 didn't jump to line 1361, because the condition on line 1357 was never false
1358 return
1360 # the VDI must be in the process of being created
1361 self.exists = False
1363 vdi_sm_config = self.sr.srcmd.params.get("vdi_sm_config")
1364 if vdi_sm_config:
1365 image_format = vdi_sm_config.get("image-format") or vdi_sm_config.get("type")
1366 if image_format:
1367 try:
1368 self._setType(CREATE_PARAM_TYPES[image_format])
1369 except:
1370 raise xs_errors.XenError('VDICreate', opterr='bad image format')
1371 if self.sr.legacyMode and self.sr.cmd == 'vdi_create' and VdiType.isCowImage(self.vdi_type):
1372 raise xs_errors.XenError('VDICreate', opterr='Cannot create COW type disk in legacy mode')
1374 if not self.vdi_type:
1375 self._setType(getVdiTypeFromImageFormat(self.sr.preferred_image_formats[0]))
1377 self.lvname = "%s%s" % (LV_PREFIX[self.vdi_type], vdi_uuid)
1378 self.path = os.path.join(self.sr.path, self.lvname)
1380 @override
1381 def create(self, sr_uuid, vdi_uuid, size) -> str:
1382 util.SMlog("LVMVDI.create for %s" % self.uuid)
1383 if not self.sr.isMaster:
1384 raise xs_errors.XenError('LVMMaster')
1385 if self.exists:
1386 raise xs_errors.XenError('VDIExists')
1388 size = self.cowutil.validateAndRoundImageSize(int(size))
1390 util.SMlog("LVMVDI.create: type = %s, %s (size=%s)" % \
1391 (self.vdi_type, self.path, size))
1392 lvSize = 0
1393 self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
1394 if not VdiType.isCowImage(self.vdi_type):
1395 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size))
1396 else:
1397 if self.sr.provision == "thin":
1398 lvSize = util.roundup(
1399 lvutil.LVM_SIZE_INCREMENT,
1400 self.cowutil.calcOverheadEmpty(max(size, self.cowutil.getDefaultPreallocationSizeVirt()))
1401 )
1402 elif self.sr.provision == "thick":
1403 lvSize = self.lvmcowutil.calcVolumeSize(int(size))
1405 self.sr._ensureSpaceAvailable(lvSize)
1407 try:
1408 self.sr.lvmCache.create(self.lvname, lvSize)
1409 if not VdiType.isCowImage(self.vdi_type):
1410 self.size = self.sr.lvmCache.getSize(self.lvname)
1411 else:
1412 self.cowutil.create(
1413 self.path, int(size), False, self.cowutil.getDefaultPreallocationSizeVirt()
1414 )
1415 self.size = self.cowutil.getSizeVirt(self.path)
1416 self.sr.lvmCache.deactivateNoRefcount(self.lvname)
1417 except util.CommandException as e:
1418 util.SMlog("Unable to create VDI")
1419 self.sr.lvmCache.remove(self.lvname)
1420 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
1422 self.utilisation = lvSize
1423 self.sm_config["vdi_type"] = self.vdi_type
1424 self.sm_config["image-format"] = getImageStringFromVdiType(self.vdi_type)
1426 if not self.sr.legacyMode:
1427 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1429 self.ref = self._db_introduce()
1430 self.sr._updateStats(self.sr.uuid, self.size)
1432 vdi_info = {UUID_TAG: self.uuid,
1433 NAME_LABEL_TAG: util.to_plain_string(self.label),
1434 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
1435 IS_A_SNAPSHOT_TAG: 0,
1436 SNAPSHOT_OF_TAG: '',
1437 SNAPSHOT_TIME_TAG: '',
1438 TYPE_TAG: self.ty,
1439 VDI_TYPE_TAG: self.vdi_type,
1440 READ_ONLY_TAG: int(self.read_only),
1441 MANAGED_TAG: int(self.managed),
1442 METADATA_OF_POOL_TAG: ''
1443 }
1445 if not self.sr.legacyMode:
1446 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1448 return VDI.VDI.get_params(self)
1450 @override
1451 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None:
1452 util.SMlog("LVMVDI.delete for %s" % self.uuid)
1453 try:
1454 self._loadThis()
1455 except xs_errors.SRException as e:
1456 # Catch 'VDI doesn't exist' exception
1457 if e.errno == 46:
1458 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1459 raise
1461 vdi_ref = self.sr.srcmd.params['vdi_ref']
1462 if not self.session.xenapi.VDI.get_managed(vdi_ref):
1463 raise xs_errors.XenError("VDIDelete", \
1464 opterr="Deleting non-leaf node not permitted")
1466 if not self.hidden:
1467 self._markHidden()
1469 if not data_only:
1470 # Remove from XAPI and delete from MGT
1471 self._db_forget()
1472 else:
1473 # If this is a data_destroy call, don't remove from XAPI db
1474 # Only delete from MGT
1475 if not self.sr.legacyMode:
1476 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid)
1478 # deactivate here because it might be too late to do it in the "final"
1479 # step: GC might have removed the LV by then
1480 if self.sr.lvActivator.get(self.uuid, False):
1481 self.sr.lvActivator.deactivate(self.uuid, False)
1483 try:
1484 self.sr.lvmCache.remove(self.lvname)
1485 self.sr.lock.cleanup(vdi_uuid, NS_PREFIX_LVM + sr_uuid)
1486 self.sr.lock.cleanupAll(vdi_uuid)
1487 except xs_errors.SRException as e:
1488 util.SMlog(
1489 "Failed to remove the volume (maybe is leaf coalescing) "
1490 "for %s err:%d" % (self.uuid, e.errno))
1492 self.sr._updateStats(self.sr.uuid, -self.size)
1493 self.sr._kickGC()
1494 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1496 @override
1497 def attach(self, sr_uuid, vdi_uuid) -> str:
1498 util.SMlog("LVMVDI.attach for %s" % self.uuid)
1499 if self.sr.journaler.hasJournals(self.uuid):
1500 raise xs_errors.XenError('VDIUnavailable',
1501 opterr='Interrupted operation detected on this VDI, '
1502 'scan SR first to trigger auto-repair')
1504 writable = ('args' not in self.sr.srcmd.params) or \
1505 (self.sr.srcmd.params['args'][0] == "true")
1506 needInflate = True
1507 if not VdiType.isCowImage(self.vdi_type) or not writable:
1508 needInflate = False
1509 else:
1510 self._loadThis()
1511 if self.utilisation >= self.lvmcowutil.calcVolumeSize(self.size):
1512 needInflate = False
1514 if needInflate:
1515 try:
1516 self._prepareThin(True, self.vdi_type)
1517 except:
1518 util.logException("attach")
1519 raise xs_errors.XenError('LVMProvisionAttach')
1521 try:
1522 return self._attach()
1523 finally:
1524 if not self.sr.lvActivator.deactivateAll():
1525 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
1527 @override
1528 def detach(self, sr_uuid, vdi_uuid) -> None:
1529 util.SMlog("LVMVDI.detach for %s" % self.uuid)
1530 self._loadThis()
1531 already_deflated = (self.utilisation < \
1532 self.lvmcowutil.calcVolumeSize(self.size))
1533 needDeflate = True
1534 if not VdiType.isCowImage(self.vdi_type) or already_deflated:
1535 needDeflate = False
1536 elif self.sr.provision == "thick":
1537 needDeflate = False
1538 # except for snapshots, which are always deflated
1539 if self.sr.srcmd.cmd != 'vdi_detach_from_config':
1540 vdi_ref = self.sr.srcmd.params['vdi_ref']
1541 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref)
1542 if snap:
1543 needDeflate = True
1545 if needDeflate:
1546 try:
1547 self._prepareThin(False, self.vdi_type)
1548 except:
1549 util.logException("_prepareThin")
1550 raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
1552 try:
1553 self._detach()
1554 finally:
1555 if not self.sr.lvActivator.deactivateAll():
1556 raise xs_errors.XenError("SMGeneral", opterr="deactivation")
1558 # We only support offline resize
1559 @override
1560 def resize(self, sr_uuid, vdi_uuid, size) -> str:
1561 util.SMlog("LVMVDI.resize for %s" % self.uuid)
1562 if not self.sr.isMaster:
1563 raise xs_errors.XenError('LVMMaster')
1565 self._loadThis()
1566 if self.hidden:
1567 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
1569 if size < self.size:
1570 util.SMlog('vdi_resize: shrinking not supported: ' + \
1571 '(current size: %d, new size: %d)' % (self.size, size))
1572 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
1574 size = self.cowutil.validateAndRoundImageSize(int(size))
1576 if size == self.size:
1577 return VDI.VDI.get_params(self)
1579 if not VdiType.isCowImage(self.vdi_type):
1580 lvSizeOld = self.size
1581 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size)
1582 else:
1583 lvSizeOld = self.utilisation
1584 lvSizeNew = self.lvmcowutil.calcVolumeSize(size)
1585 if self.sr.provision == "thin":
1586 # VDI is currently deflated, so keep it deflated
1587 lvSizeNew = lvSizeOld
1588 assert(lvSizeNew >= lvSizeOld)
1589 spaceNeeded = lvSizeNew - lvSizeOld
1590 self.sr._ensureSpaceAvailable(spaceNeeded)
1592 oldSize = self.size
1593 if not VdiType.isCowImage(self.vdi_type):
1594 self.sr.lvmCache.setSize(self.lvname, lvSizeNew)
1595 self.size = self.sr.lvmCache.getSize(self.lvname)
1596 self.utilisation = self.size
1597 else:
1598 if lvSizeNew != lvSizeOld:
1599 self.lvmcowutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type, lvSizeNew)
1600 self.cowutil.setSizeVirtFast(self.path, size)
1601 self.size = self.cowutil.getSizeVirt(self.path)
1602 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
1604 vdi_ref = self.sr.srcmd.params['vdi_ref']
1605 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
1606 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
1607 str(self.utilisation))
1608 self.sr._updateStats(self.sr.uuid, self.size - oldSize)
1609 super(LVMVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size)
1610 return VDI.VDI.get_params(self)
1612 @override
1613 def clone(self, sr_uuid, vdi_uuid) -> str:
1614 return self._do_snapshot(
1615 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True)
1617 @override
1618 def compose(self, sr_uuid, vdi1, vdi2) -> None:
1619 util.SMlog("LVMSR.compose for %s -> %s" % (vdi2, vdi1))
1620 if not VdiType.isCowImage(self.vdi_type):
1621 raise xs_errors.XenError('Unimplemented')
1623 parent_uuid = vdi1
1624 parent_lvname = LV_PREFIX[self.vdi_type] + parent_uuid
1625 assert(self.sr.lvmCache.checkLV(parent_lvname))
1626 parent_path = os.path.join(self.sr.path, parent_lvname)
1628 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1629 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
1631 self.cowutil.setParent(self.path, parent_path, False)
1632 self.cowutil.setHidden(parent_path)
1633 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False)
1635 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid,
1636 True):
1637 raise util.SMException("failed to refresh VDI %s" % self.uuid)
1639 util.SMlog("Compose done")
1641 def reset_leaf(self, sr_uuid, vdi_uuid):
1642 util.SMlog("LVMSR.reset_leaf for %s" % vdi_uuid)
1643 if not VdiType.isCowImage(self.vdi_type):
1644 raise xs_errors.XenError('Unimplemented')
1646 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1648 # safety check
1649 if not self.cowutil.hasParent(self.path):
1650 raise util.SMException("ERROR: VDI %s has no parent, " + \
1651 "will not reset contents" % self.uuid)
1653 self.cowutil.killData(self.path)
1655 def _attach(self):
1656 self._chainSetActive(True, True, True)
1657 if not util.pathexists(self.path):
1658 raise xs_errors.XenError('VDIUnavailable', \
1659 opterr='Could not find: %s' % self.path)
1661 if not hasattr(self, 'xenstore_data'):
1662 self.xenstore_data = {}
1664 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \
1665 scsiutil.gen_synthetic_page_data(self.uuid)))
1667 self.xenstore_data['storage-type'] = 'lvm'
1668 self.xenstore_data['vdi-type'] = self.vdi_type
1670 self.attached = True
1671 self.sr.lvActivator.persist()
1672 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
1674 def _detach(self):
1675 self._chainSetActive(False, True)
1676 self.attached = False
1678 @override
1679 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType,
1680 cloneOp=False, secondary=None, cbtlog=None, is_mirror_destination=False) -> str:
1681 # If cbt enabled, save file consistency state
1682 if cbtlog is not None:
1683 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1683 ↛ 1684line 1683 didn't jump to line 1684, because the condition on line 1683 was never true
1684 consistency_state = False
1685 else:
1686 consistency_state = True
1687 util.SMlog("Saving log consistency state of %s for vdi: %s" %
1688 (consistency_state, vdi_uuid))
1689 else:
1690 consistency_state = None
1692 pause_time = time.time()
1693 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1693 ↛ 1694line 1693 didn't jump to line 1694, because the condition on line 1693 was never true
1694 raise util.SMException("failed to pause VDI %s" % vdi_uuid)
1696 snapResult = None
1697 try:
1698 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state, is_mirror_destination)
1699 except Exception as e1:
1700 try:
1701 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid,
1702 secondary=None)
1703 except Exception as e2:
1704 util.SMlog('WARNING: failed to clean up failed snapshot: '
1705 '%s (error ignored)' % e2)
1706 raise
1707 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary)
1708 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
1709 unpause_time = time.time()
1710 if (unpause_time - pause_time) > LONG_SNAPTIME: 1710 ↛ 1711line 1710 didn't jump to line 1711, because the condition on line 1710 was never true
1711 util.SMlog('WARNING: snapshot paused VM for %s seconds' %
1712 (unpause_time - pause_time))
1713 return snapResult
1715 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None, is_mirror_destination=False):
1716 util.SMlog("LVMVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
1718 if not self.sr.isMaster: 1718 ↛ 1719line 1718 didn't jump to line 1719, because the condition on line 1718 was never true
1719 raise xs_errors.XenError('LVMMaster')
1720 if self.sr.legacyMode: 1720 ↛ 1721line 1720 didn't jump to line 1721, because the condition on line 1720 was never true
1721 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
1723 self._loadThis()
1724 if self.hidden: 1724 ↛ 1725line 1724 didn't jump to line 1725, because the condition on line 1724 was never true
1725 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
1727 snapVdiType = self.sr._get_snap_vdi_type(self.vdi_type, self.size)
1729 self.sm_config = self.session.xenapi.VDI.get_sm_config( \
1730 self.sr.srcmd.params['vdi_ref'])
1731 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1731 ↛ 1732line 1731 didn't jump to line 1732, because the condition on line 1731 was never true
1732 if not util.fistpoint.is_active("testsm_clone_allow_raw"):
1733 raise xs_errors.XenError('Unimplemented', \
1734 opterr='Raw VDI, snapshot or clone not permitted')
1736 # we must activate the entire image chain because the real parent could
1737 # theoretically be anywhere in the chain if all images under it are empty
1738 self._chainSetActive(True, False)
1739 if not util.pathexists(self.path): 1739 ↛ 1740line 1739 didn't jump to line 1740, because the condition on line 1739 was never true
1740 raise xs_errors.XenError('VDIUnavailable', \
1741 opterr='VDI unavailable: %s' % (self.path))
1743 if VdiType.isCowImage(self.vdi_type): 1743 ↛ 1751line 1743 didn't jump to line 1751, because the condition on line 1743 was never false
1744 depth = self.cowutil.getDepth(self.path)
1745 if depth == -1: 1745 ↛ 1746line 1745 didn't jump to line 1746, because the condition on line 1745 was never true
1746 raise xs_errors.XenError('VDIUnavailable', \
1747 opterr='failed to get COW depth')
1748 elif depth >= self.cowutil.getMaxChainLength(): 1748 ↛ 1749line 1748 didn't jump to line 1749, because the condition on line 1748 was never true
1749 raise xs_errors.XenError('SnapshotChainTooLong')
1751 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \
1752 self.sr.srcmd.params['vdi_ref'])
1754 fullpr = self.lvmcowutil.calcVolumeSize(self.size)
1755 thinpr = util.roundup(
1756 lvutil.LVM_SIZE_INCREMENT,
1757 self.cowutil.calcOverheadEmpty(max(self.size, self.cowutil.getDefaultPreallocationSizeVirt()))
1758 )
1759 lvSizeOrig = thinpr
1760 lvSizeClon = thinpr
1762 hostRefs = []
1763 if self.sr.cmd == "vdi_snapshot":
1764 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid])
1765 if hostRefs: 1765 ↛ 1767line 1765 didn't jump to line 1767, because the condition on line 1765 was never false
1766 lvSizeOrig = fullpr
1767 if self.sr.provision == "thick": 1767 ↛ 1773line 1767 didn't jump to line 1773, because the condition on line 1767 was never false
1768 if not self.issnap: 1768 ↛ 1769line 1768 didn't jump to line 1769, because the condition on line 1768 was never true
1769 lvSizeOrig = fullpr
1770 if self.sr.cmd != "vdi_snapshot":
1771 lvSizeClon = fullpr
1773 if (snapType == VDI.SNAPSHOT_SINGLE or 1773 ↛ 1775line 1773 didn't jump to line 1775, because the condition on line 1773 was never true
1774 snapType == VDI.SNAPSHOT_INTERNAL):
1775 lvSizeClon = 0
1777 # the space required must include 2 journal LVs: a clone journal and an
1778 # inflate journal (for the failure handling
1779 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE
1780 lvSizeBase = self.size
1781 if VdiType.isCowImage(self.vdi_type): 1781 ↛ 1784line 1781 didn't jump to line 1784, because the condition on line 1781 was never false
1782 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, self.cowutil.getSizePhys(self.path))
1783 size_req -= (self.utilisation - lvSizeBase)
1784 self.sr._ensureSpaceAvailable(size_req)
1786 if hostRefs:
1787 self.sr._deactivateOnSlave(hostRefs, self.lvname)
1789 baseUuid = util.gen_uuid()
1790 origUuid = self.uuid
1791 clonUuid = ""
1792 if snapType == VDI.SNAPSHOT_DOUBLE: 1792 ↛ 1794line 1792 didn't jump to line 1794, because the condition on line 1792 was never false
1793 clonUuid = util.gen_uuid()
1794 jval = "%s_%s" % (baseUuid, clonUuid)
1795 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval)
1796 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid)
1798 try:
1799 # self becomes the "base vdi"
1800 origOldLV = self.lvname
1801 baseLV = LV_PREFIX[self.vdi_type] + baseUuid
1802 self.sr.lvmCache.rename(self.lvname, baseLV)
1803 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False)
1804 RefCounter.set(baseUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid)
1805 self.uuid = baseUuid
1806 self.lvname = baseLV
1807 self.path = os.path.join(self.sr.path, baseLV)
1808 self.label = "base copy"
1809 self.read_only = True
1810 self.location = self.uuid
1811 self.managed = False
1813 # shrink the base copy to the minimum - we do it before creating
1814 # the snapshot volumes to avoid requiring double the space
1815 if VdiType.isCowImage(self.vdi_type): 1815 ↛ 1818line 1815 didn't jump to line 1818, because the condition on line 1815 was never false
1816 self.lvmcowutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase)
1817 self.utilisation = lvSizeBase
1818 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
1820 snapVDI = self._createSnap(origUuid, snapVdiType, lvSizeOrig, False, is_mirror_destination)
1821 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid)
1822 snapVDI2 = None
1823 if snapType == VDI.SNAPSHOT_DOUBLE: 1823 ↛ 1829line 1823 didn't jump to line 1829, because the condition on line 1823 was never false
1824 snapVDI2 = self._createSnap(clonUuid, snapVdiType, lvSizeClon, True)
1825 # If we have CBT enabled on the VDI,
1826 # set CBT status for the new snapshot disk
1827 if cbtlog:
1828 snapVDI2.cbt_enabled = True
1829 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
1831 # note: it is important to mark the parent hidden only AFTER the
1832 # new image children have been created, which are referencing it;
1833 # otherwise we would introduce a race with GC that could reclaim
1834 # the parent before we snapshot it
1835 if not VdiType.isCowImage(self.vdi_type): 1835 ↛ 1836line 1835 didn't jump to line 1836, because the condition on line 1835 was never true
1836 self.sr.lvmCache.setHidden(self.lvname)
1837 else:
1838 self.cowutil.setHidden(self.path)
1839 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
1841 # set the base copy to ReadOnly
1842 self.sr.lvmCache.setReadonly(self.lvname, True)
1843 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
1845 if hostRefs:
1846 self.sr._updateSlavesOnClone(hostRefs, origOldLV,
1847 snapVDI.lvname, self.uuid, self.lvname)
1849 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE)
1850 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog:
1851 snapVDI._cbt_snapshot(clonUuid, cbt_consistency)
1852 if hostRefs: 1852 ↛ 1866line 1852 didn't jump to line 1866, because the condition on line 1852 was never false
1853 cbtlog_file = self._get_cbt_logname(snapVDI.uuid)
1854 try:
1855 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file)
1856 except:
1857 alert_name = "VDI_CBT_SNAPSHOT_FAILED"
1858 alert_str = ("Creating CBT snapshot for {} failed"
1859 .format(snapVDI.uuid))
1860 snapVDI._disable_cbt_on_error(alert_name, alert_str)
1861 pass
1863 except (util.SMException, XenAPI.Failure) as e:
1864 util.logException("LVMVDI._snapshot")
1865 self._failClone(origUuid, jval, str(e))
1866 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid)
1868 self.sr.journaler.remove(self.JRN_CLONE, origUuid)
1870 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType)
1872 def _createSnap(self, snapUuid, snapVdiType, snapSizeLV, isNew, is_mirror_destination=False):
1873 """Snapshot self and return the snapshot VDI object"""
1875 snapLV = LV_PREFIX[snapVdiType] + snapUuid
1876 snapPath = os.path.join(self.sr.path, snapLV)
1877 self.sr.lvmCache.create(snapLV, int(snapSizeLV))
1878 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid)
1879 if isNew:
1880 RefCounter.set(snapUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid)
1881 self.sr.lvActivator.add(snapUuid, snapLV, False)
1882 parentRaw = (self.vdi_type == VdiType.RAW)
1883 self.cowutil.snapshot(
1884 snapPath, self.path, parentRaw, max(self.size, self.cowutil.getDefaultPreallocationSizeVirt()), is_mirror_image=is_mirror_destination
1885 )
1886 snapParent = self.cowutil.getParent(snapPath, LvmCowUtil.extractUuid)
1888 snapVDI = LVMVDI(self.sr, snapUuid)
1889 snapVDI.read_only = False
1890 snapVDI.location = snapUuid
1891 snapVDI.size = self.size
1892 snapVDI.utilisation = snapSizeLV
1893 snapVDI.sm_config = dict()
1894 for key, val in self.sm_config.items(): 1894 ↛ 1895line 1894 didn't jump to line 1895, because the loop on line 1894 never started
1895 if key not in [
1896 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \
1897 not key.startswith("host_"):
1898 snapVDI.sm_config[key] = val
1899 snapVDI.sm_config["vdi_type"] = snapVdiType
1900 snapVDI.sm_config["vhd-parent"] = snapParent
1901 # TODO: fix the raw snapshot case
1902 snapVDI.sm_config["image-format"] = getImageStringFromVdiType(self.vdi_type)
1903 snapVDI.lvname = snapLV
1904 return snapVDI
1906 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None):
1907 if snapType is not VDI.SNAPSHOT_INTERNAL: 1907 ↛ 1909line 1907 didn't jump to line 1909, because the condition on line 1907 was never false
1908 self.sr._updateStats(self.sr.uuid, self.size)
1909 basePresent = True
1911 # Verify parent locator field of both children and delete basePath if
1912 # unused
1913 snapParent = snapVDI.sm_config["vhd-parent"]
1914 snap2Parent = ""
1915 if snapVDI2: 1915 ↛ 1917line 1915 didn't jump to line 1917, because the condition on line 1915 was never false
1916 snap2Parent = snapVDI2.sm_config["vhd-parent"]
1917 if snapParent != self.uuid and \ 1917 ↛ 1951line 1917 didn't jump to line 1951, because the condition on line 1917 was never false
1918 (not snapVDI2 or snap2Parent != self.uuid):
1919 util.SMlog("%s != %s != %s => deleting unused base %s" % \
1920 (snapParent, self.uuid, snap2Parent, self.lvname))
1921 RefCounter.put(self.uuid, False, NS_PREFIX_LVM + self.sr.uuid)
1923 # The removed LV could still be activated on a slave host if it's
1924 # part of a VM currently running there, we need to deactivate it
1925 # before it gets removed to avoid a LV leak.
1926 if hostRefs:
1927 self.sr._deactivateOnSlave(hostRefs, self.lvname)
1929 self.sr.lvmCache.remove(self.lvname)
1930 self.sr.lvActivator.remove(self.uuid, False)
1931 if hostRefs:
1932 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname)
1933 basePresent = False
1934 else:
1935 # assign the _binary_ refcount of the original VDI to the new base
1936 # VDI (but as the normal refcount, since binary refcounts are only
1937 # for leaf nodes). The normal refcount of the child is not
1938 # transferred to to the base VDI because normal refcounts are
1939 # incremented and decremented individually, and not based on the
1940 # image chain (i.e., the child's normal refcount will be decremented
1941 # independently of its parent situation). Add 1 for this clone op.
1942 # Note that we do not need to do protect the refcount operations
1943 # below with per-VDI locking like we do in lvutil because at this
1944 # point we have exclusive access to the VDIs involved. Other SM
1945 # operations are serialized by the Agent or with the SR lock, and
1946 # any coalesce activations are serialized with the SR lock. (The
1947 # coalesce activates the coalesced VDI pair in the beginning, which
1948 # cannot affect the VDIs here because they cannot possibly be
1949 # involved in coalescing at this point, and at the relinkSkip step
1950 # that activates the children, which takes the SR lock.)
1951 ns = NS_PREFIX_LVM + self.sr.uuid
1952 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns)
1953 RefCounter.set(self.uuid, bcnt + 1, 0, ns)
1955 # the "paused" and "host_*" sm-config keys are special and must stay on
1956 # the leaf without being inherited by anyone else
1957 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1957 ↛ 1958line 1957 didn't jump to line 1958, because the loop on line 1957 never started
1958 snapVDI.sm_config[key] = self.sm_config[key]
1959 del self.sm_config[key]
1961 # Introduce any new VDI records & update the existing one
1962 type = self.session.xenapi.VDI.get_type( \
1963 self.sr.srcmd.params['vdi_ref'])
1964 if snapVDI2: 1964 ↛ 2006line 1964 didn't jump to line 2006, because the condition on line 1964 was never false
1965 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1966 vdiRef = snapVDI2._db_introduce()
1967 if cloneOp:
1968 vdi_info = {UUID_TAG: snapVDI2.uuid,
1969 NAME_LABEL_TAG: util.to_plain_string( \
1970 self.session.xenapi.VDI.get_name_label( \
1971 self.sr.srcmd.params['vdi_ref'])),
1972 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1973 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1974 IS_A_SNAPSHOT_TAG: 0,
1975 SNAPSHOT_OF_TAG: '',
1976 SNAPSHOT_TIME_TAG: '',
1977 TYPE_TAG: type,
1978 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1979 READ_ONLY_TAG: 0,
1980 MANAGED_TAG: int(snapVDI2.managed),
1981 METADATA_OF_POOL_TAG: ''
1982 }
1983 else:
1984 util.SMlog("snapshot VDI params: %s" % \
1985 self.session.xenapi.VDI.get_snapshot_time(vdiRef))
1986 vdi_info = {UUID_TAG: snapVDI2.uuid,
1987 NAME_LABEL_TAG: util.to_plain_string( \
1988 self.session.xenapi.VDI.get_name_label( \
1989 self.sr.srcmd.params['vdi_ref'])),
1990 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1991 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1992 IS_A_SNAPSHOT_TAG: 1,
1993 SNAPSHOT_OF_TAG: snapVDI.uuid,
1994 SNAPSHOT_TIME_TAG: '',
1995 TYPE_TAG: type,
1996 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1997 READ_ONLY_TAG: 0,
1998 MANAGED_TAG: int(snapVDI2.managed),
1999 METADATA_OF_POOL_TAG: ''
2000 }
2002 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
2003 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \
2004 (vdiRef, snapVDI2.uuid))
2006 if basePresent: 2006 ↛ 2007line 2006 didn't jump to line 2007, because the condition on line 2006 was never true
2007 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
2008 vdiRef = self._db_introduce()
2009 vdi_info = {UUID_TAG: self.uuid,
2010 NAME_LABEL_TAG: self.label,
2011 NAME_DESCRIPTION_TAG: self.description,
2012 IS_A_SNAPSHOT_TAG: 0,
2013 SNAPSHOT_OF_TAG: '',
2014 SNAPSHOT_TIME_TAG: '',
2015 TYPE_TAG: type,
2016 VDI_TYPE_TAG: self.sm_config['vdi_type'],
2017 READ_ONLY_TAG: 1,
2018 MANAGED_TAG: 0,
2019 METADATA_OF_POOL_TAG: ''
2020 }
2022 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
2023 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \
2024 (vdiRef, self.uuid))
2026 # Update the original record
2027 vdi_ref = self.sr.srcmd.params['vdi_ref']
2028 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config)
2029 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \
2030 str(snapVDI.utilisation))
2032 # Return the info on the new snap VDI
2033 snap = snapVDI2
2034 if not snap: 2034 ↛ 2035line 2034 didn't jump to line 2035, because the condition on line 2034 was never true
2035 snap = self
2036 if not basePresent:
2037 # a single-snapshot of an empty VDI will be a noop, resulting
2038 # in no new VDIs, so return the existing one. The GC wouldn't
2039 # normally try to single-snapshot an empty image of course, but
2040 # if an external snapshot operation manages to sneak in right
2041 # before a snapshot-coalesce phase, we would get here
2042 snap = snapVDI
2043 return snap.get_params()
2045 def _setType(self, vdiType: str) -> None:
2046 self.vdi_type = vdiType
2047 self.cowutil = getCowUtil(self.vdi_type)
2048 self.lvmcowutil = LvmCowUtil(self.cowutil)
2050 def _initFromVDIInfo(self, vdiInfo):
2051 self._setType(vdiInfo.vdiType)
2052 self.lvname = vdiInfo.lvName
2053 self.size = vdiInfo.sizeVirt
2054 self.utilisation = vdiInfo.sizeLV
2055 self.hidden = vdiInfo.hidden
2056 if self.hidden: 2056 ↛ 2057line 2056 didn't jump to line 2057, because the condition on line 2056 was never true
2057 self.managed = False
2058 self.active = vdiInfo.lvActive
2059 self.readonly = vdiInfo.lvReadonly
2060 self.parent = vdiInfo.parentUuid
2061 self.path = os.path.join(self.sr.path, self.lvname)
2062 if hasattr(self, "sm_config_override"): 2062 ↛ 2065line 2062 didn't jump to line 2065, because the condition on line 2062 was never false
2063 self.sm_config_override["vdi_type"] = self.vdi_type
2064 else:
2065 self.sm_config_override = {'vdi_type': self.vdi_type}
2066 self.loaded = True
2068 def _initFromLVInfo(self, lvInfo):
2069 self._setType(lvInfo.vdiType)
2070 self.lvname = lvInfo.name
2071 self.size = lvInfo.size
2072 self.utilisation = lvInfo.size
2073 self.hidden = lvInfo.hidden
2074 self.active = lvInfo.active
2075 self.readonly = lvInfo.readonly
2076 self.parent = ''
2077 self.path = os.path.join(self.sr.path, self.lvname)
2078 if hasattr(self, "sm_config_override"): 2078 ↛ 2081line 2078 didn't jump to line 2081, because the condition on line 2078 was never false
2079 self.sm_config_override["vdi_type"] = self.vdi_type
2080 else:
2081 self.sm_config_override = {'vdi_type': self.vdi_type}
2082 if 'vhd-parent' in self.sm_config_override: 2082 ↛ 2083line 2082 didn't jump to line 2083, because the condition on line 2082 was never true
2083 self.parent = self.sm_config_override['vhd-parent']
2084 if not VdiType.isCowImage(self.vdi_type): 2084 ↛ 2085line 2084 didn't jump to line 2085, because the condition on line 2084 was never true
2085 self.loaded = True
2087 def _initFromImageInfo(self, imageInfo):
2088 self.size = imageInfo.sizeVirt
2089 if self.parent == '' or (imageInfo.parentUuid != '' and imageInfo.parentUuid != self.parent): 2089 ↛ 2091line 2089 didn't jump to line 2091, because the condition on line 2089 was never false
2090 self.parent = imageInfo.parentUuid
2091 self.hidden = imageInfo.hidden
2092 self.loaded = True
2094 def _determineType(self):
2095 """
2096 Determine whether this is a RAW or a COW VDI.
2097 """
2098 if "vdi_ref" in self.sr.srcmd.params:
2099 vdi_ref = self.sr.srcmd.params["vdi_ref"]
2100 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2101 if sm_config.get("vdi_type"): 2101 ↛ 2102line 2101 didn't jump to line 2102, because the condition on line 2101 was never true
2102 self._setType(sm_config["vdi_type"])
2103 prefix = LV_PREFIX[self.vdi_type]
2104 self.lvname = "%s%s" % (prefix, self.uuid)
2105 self.path = os.path.join(self.sr.path, self.lvname)
2106 self.sm_config_override = sm_config
2107 return True
2109 # LVM commands can be costly, so check the file directly first in case
2110 # the LV is active
2111 found = False
2112 for vdi_type, prefix in LV_PREFIX.items():
2113 lvname = "%s%s" % (prefix, self.uuid)
2114 path = os.path.join(self.sr.path, lvname)
2115 if util.pathexists(path):
2116 if found: 2116 ↛ 2117line 2116 didn't jump to line 2117, because the condition on line 2116 was never true
2117 raise xs_errors.XenError('VDILoad',
2118 opterr="multiple VDI's: uuid %s" % self.uuid)
2119 found = True
2120 self._setType(vdi_type)
2121 self.lvname = lvname
2122 self.path = path
2123 if found:
2124 return True
2126 # now list all LV's
2127 if not lvutil._checkVG(self.sr.vgname): 2127 ↛ 2129line 2127 didn't jump to line 2129, because the condition on line 2127 was never true
2128 # when doing attach_from_config, the VG won't be there yet
2129 return False
2131 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache)
2132 if lvs.get(self.uuid): 2132 ↛ 2135line 2132 didn't jump to line 2135, because the condition on line 2132 was never false
2133 self._initFromLVInfo(lvs[self.uuid])
2134 return True
2135 return False
2137 def _loadThis(self):
2138 """
2139 Load VDI info for this VDI and activate the LV if it's COW. We
2140 don't do it in VDI.load() because not all VDI operations need it.
2141 """
2142 if self.loaded: 2142 ↛ 2143line 2142 didn't jump to line 2143, because the condition on line 2142 was never true
2143 if VdiType.isCowImage(self.vdi_type):
2144 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2145 return
2146 try:
2147 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache, self.lvname)
2148 except util.CommandException as e:
2149 raise xs_errors.XenError('VDIUnavailable',
2150 opterr='%s (LV scan error)' % os.strerror(abs(e.code)))
2151 if not lvs.get(self.uuid): 2151 ↛ 2152line 2151 didn't jump to line 2152, because the condition on line 2151 was never true
2152 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found')
2153 self._initFromLVInfo(lvs[self.uuid])
2154 if VdiType.isCowImage(self.vdi_type): 2154 ↛ 2160line 2154 didn't jump to line 2160, because the condition on line 2154 was never false
2155 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2156 imageInfo = self.cowutil.getInfo(self.path, LvmCowUtil.extractUuid, False)
2157 if not imageInfo: 2157 ↛ 2158line 2157 didn't jump to line 2158, because the condition on line 2157 was never true
2158 raise xs_errors.XenError('VDIUnavailable', opterr='getInfo failed')
2159 self._initFromImageInfo(imageInfo)
2160 self.loaded = True
2162 def _chainSetActive(self, active, binary, persistent=False):
2163 if binary: 2163 ↛ 2164line 2163 didn't jump to line 2164, because the condition on line 2163 was never true
2164 (count, bcount) = RefCounter.checkLocked(self.uuid,
2165 NS_PREFIX_LVM + self.sr.uuid)
2166 if (active and bcount > 0) or (not active and bcount == 0):
2167 return # this is a redundant activation/deactivation call
2169 vdiList = {self.uuid: self.lvname}
2170 if VdiType.isCowImage(self.vdi_type): 2170 ↛ 2172line 2170 didn't jump to line 2172, because the condition on line 2170 was never false
2171 vdiList = self.cowutil.getParentChain(self.lvname, LvmCowUtil.extractUuid, self.sr.vgname)
2172 for uuid, lvName in vdiList.items(): 2172 ↛ 2173line 2172 didn't jump to line 2173, because the loop on line 2172 never started
2173 binaryParam = binary
2174 if uuid != self.uuid:
2175 binaryParam = False # binary param only applies to leaf nodes
2176 if active:
2177 self.sr.lvActivator.activate(uuid, lvName, binaryParam,
2178 persistent)
2179 else:
2180 # just add the LVs for deactivation in the final (cleanup)
2181 # step. The LVs must not have been activated during the current
2182 # operation
2183 self.sr.lvActivator.add(uuid, lvName, binaryParam)
2185 def _failClone(self, uuid, jval, msg):
2186 try:
2187 self.sr._handleInterruptedCloneOp(uuid, jval, True)
2188 self.sr.journaler.remove(self.JRN_CLONE, uuid)
2189 except Exception as e:
2190 util.SMlog('WARNING: failed to clean up failed snapshot: ' \
2191 ' %s (error ignored)' % e)
2192 raise xs_errors.XenError('VDIClone', opterr=msg)
2194 def _markHidden(self):
2195 if not VdiType.isCowImage(self.vdi_type):
2196 self.sr.lvmCache.setHidden(self.lvname)
2197 else:
2198 self.cowutil.setHidden(self.path)
2199 self.hidden = 1
2201 def _prepareThin(self, attach, vdiType):
2202 origUtilisation = self.sr.lvmCache.getSize(self.lvname)
2203 if self.sr.isMaster:
2204 # the master can prepare the VDI locally
2205 if attach:
2206 self.lvmcowutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type)
2207 else:
2208 self.lvmcowutil.detachThin(self.session, self.sr.lvmCache, self.sr.uuid, self.uuid, self.vdi_type)
2209 else:
2210 fn = "attach"
2211 if not attach:
2212 fn = "detach"
2213 pools = self.session.xenapi.pool.get_all()
2214 master = self.session.xenapi.pool.get_master(pools[0])
2215 rv = self.session.xenapi.host.call_plugin(
2216 master,
2217 self.sr.THIN_PLUGIN,
2218 fn,
2219 {
2220 "srUuid": self.sr.uuid,
2221 "vdiUuid": self.uuid,
2222 "vdiType": vdiType
2223 }
2224 )
2225 util.SMlog("call-plugin returned: %s" % rv)
2226 if not rv:
2227 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN)
2228 # refresh to pick up the size change on this slave
2229 self.sr.lvmCache.activateNoRefcount(self.lvname, True)
2231 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
2232 if origUtilisation != self.utilisation:
2233 vdi_ref = self.sr.srcmd.params['vdi_ref']
2234 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
2235 str(self.utilisation))
2236 stats = lvutil._getVGstats(self.sr.vgname)
2237 sr_utilisation = stats['physical_utilisation']
2238 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref,
2239 str(sr_utilisation))
2241 @override
2242 def update(self, sr_uuid, vdi_uuid) -> None:
2243 if self.sr.legacyMode:
2244 return
2246 #Synch the name_label of this VDI on storage with the name_label in XAPI
2247 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
2248 update_map = {}
2249 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
2250 METADATA_OBJECT_TYPE_VDI
2251 update_map[UUID_TAG] = self.uuid
2252 update_map[NAME_LABEL_TAG] = util.to_plain_string( \
2253 self.session.xenapi.VDI.get_name_label(vdi_ref))
2254 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \
2255 self.session.xenapi.VDI.get_name_description(vdi_ref))
2256 update_map[SNAPSHOT_TIME_TAG] = \
2257 self.session.xenapi.VDI.get_snapshot_time(vdi_ref)
2258 update_map[METADATA_OF_POOL_TAG] = \
2259 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
2260 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
2262 @override
2263 def _ensure_cbt_space(self) -> None:
2264 # We need virtual_size to compute the size in case of a bigger VDI
2265 self.sr.ensureCBTSpace(self.size)
2267 @override
2268 def _create_cbt_log(self) -> str:
2269 logname = self._get_cbt_logname(self.uuid)
2270 logsize = max(util.roundup(CBT_BLOCK_SIZE, self.size//CBT_BLOCK_SIZE), self.sr.journaler.LV_SIZE)
2271 # We choose 4MiB as the minimum for the log size to maintain the old behavior and compute the correct amount
2272 # if we need a bigger LV for the CBT (can happen with big QCOW2)
2273 self.sr.lvmCache.create(logname, logsize, CBTLOG_TAG)
2274 logpath = super(LVMVDI, self)._create_cbt_log()
2275 self.sr.lvmCache.deactivateNoRefcount(logname)
2276 return logpath
2278 @override
2279 def _delete_cbt_log(self) -> None:
2280 logpath = self._get_cbt_logpath(self.uuid)
2281 if self._cbt_log_exists(logpath):
2282 logname = self._get_cbt_logname(self.uuid)
2283 self.sr.lvmCache.remove(logname)
2285 @override
2286 def _rename(self, oldpath, newpath) -> None:
2287 oldname = os.path.basename(oldpath)
2288 newname = os.path.basename(newpath)
2289 self.sr.lvmCache.rename(oldname, newname)
2291 @override
2292 def update_slaves_on_cbt_disable(self, cbtlog) -> None:
2293 args = {
2294 "vgName": self.sr.vgname,
2295 "action1": "deactivateNoRefcount",
2296 "lvName1": cbtlog
2297 }
2299 host_refs = util.get_hosts_attached_on(self.session, [self.uuid])
2301 message = f"Deactivating {cbtlog}"
2302 self.sr.call_on_slave(args, host_refs, message)
2304 @override
2305 def _activate_cbt_log(self, lv_name) -> bool:
2306 self.sr.lvmCache.refresh()
2307 if not self.sr.lvmCache.is_active(lv_name): 2307 ↛ 2308line 2307 didn't jump to line 2308, because the condition on line 2307 was never true
2308 try:
2309 self.sr.lvmCache.activateNoRefcount(lv_name)
2310 return True
2311 except Exception as e:
2312 util.SMlog("Exception in _activate_cbt_log, "
2313 "Error: %s." % str(e))
2314 raise
2315 else:
2316 return False
2318 @override
2319 def _deactivate_cbt_log(self, lv_name) -> None:
2320 try:
2321 self.sr.lvmCache.deactivateNoRefcount(lv_name)
2322 except Exception as e:
2323 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e))
2324 raise
2326 @override
2327 def _cbt_log_exists(self, logpath) -> bool:
2328 return lvutil.exists(logpath)
2330if __name__ == '__main__': 2330 ↛ 2331line 2330 didn't jump to line 2331, because the condition on line 2330 was never true
2331 SRCommand.run(LVMSR, DRIVER_INFO)
2332else:
2333 SR.registerSR(LVMSR)