Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# LVHDSR: VHD on LVM storage repository 

19# 

20 

21from sm_typing import Dict, List, override 

22 

23import SR 

24from SR import deviceCheck 

25import VDI 

26import SRCommand 

27import util 

28import lvutil 

29import lvmcache 

30import vhdutil 

31import lvhdutil 

32import scsiutil 

33import os 

34import sys 

35import time 

36import errno 

37import xs_errors 

38import cleanup 

39import blktap2 

40from journaler import Journaler 

41from lock import Lock 

42from refcounter import RefCounter 

43from ipc import IPCFlag 

44from lvmanager import LVActivator 

45import XenAPI # pylint: disable=import-error 

46import re 

47from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \ 

48 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \ 

49 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \ 

50 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \ 

51 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG 

52from metadata import retrieveXMLfromFile, _parseXML 

53from xmlrpc.client import DateTime 

54import glob 

55from constants import CBTLOG_TAG 

56from fairlock import Fairlock 

57DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX) 

58 

59geneology: Dict[str, List[str]] = {} 

60CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM", 

61 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR", 

62 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE", 

63 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT", 

64 "VDI_ACTIVATE", "VDI_DEACTIVATE"] 

65 

66CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']] 

67 

68DRIVER_INFO = { 

69 'name': 'Local VHD on LVM', 

70 'description': 'SR plugin which represents disks as VHD disks on ' + \ 

71 'Logical Volumes within a locally-attached Volume Group', 

72 'vendor': 'XenSource Inc', 

73 'copyright': '(C) 2008 XenSource Inc', 

74 'driver_version': '1.0', 

75 'required_api_version': '1.0', 

76 'capabilities': CAPABILITIES, 

77 'configuration': CONFIGURATION 

78 } 

79 

80PARAM_VHD = "vhd" 

81PARAM_RAW = "raw" 

82 

83OPS_EXCLUSIVE = [ 

84 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan", 

85 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot", 

86 "vdi_clone"] 

87 

88# Log if snapshot pauses VM for more than this many seconds 

89LONG_SNAPTIME = 60 

90 

91class LVHDSR(SR.SR): 

92 DRIVER_TYPE = 'lvhd' 

93 

94 PROVISIONING_TYPES = ["thin", "thick"] 

95 PROVISIONING_DEFAULT = "thick" 

96 THIN_PLUGIN = "lvhd-thin" 

97 

98 PLUGIN_ON_SLAVE = "on-slave" 

99 

100 FLAG_USE_VHD = "use_vhd" 

101 MDVOLUME_NAME = "MGT" 

102 

103 ALLOCATION_QUANTUM = "allocation_quantum" 

104 INITIAL_ALLOCATION = "initial_allocation" 

105 

106 LOCK_RETRY_INTERVAL = 3 

107 LOCK_RETRY_ATTEMPTS = 10 

108 

109 TEST_MODE_KEY = "testmode" 

110 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin" 

111 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator" 

112 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end" 

113 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin" 

114 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data" 

115 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata" 

116 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end" 

117 

118 ENV_VAR_VHD_TEST = { 

119 TEST_MODE_VHD_FAIL_REPARENT_BEGIN: 

120 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN", 

121 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR: 

122 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR", 

123 TEST_MODE_VHD_FAIL_REPARENT_END: 

124 "VHD_UTIL_TEST_FAIL_REPARENT_END", 

125 TEST_MODE_VHD_FAIL_RESIZE_BEGIN: 

126 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN", 

127 TEST_MODE_VHD_FAIL_RESIZE_DATA: 

128 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED", 

129 TEST_MODE_VHD_FAIL_RESIZE_METADATA: 

130 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED", 

131 TEST_MODE_VHD_FAIL_RESIZE_END: 

132 "VHD_UTIL_TEST_FAIL_RESIZE_END" 

133 } 

134 testMode = "" 

135 

136 legacyMode = True 

137 

138 @override 

139 @staticmethod 

140 def handles(type) -> bool: 

141 """Returns True if this SR class understands the given dconf string""" 

142 # we can pose as LVMSR or EXTSR for compatibility purposes 

143 if __name__ == '__main__': 

144 name = sys.argv[0] 

145 else: 

146 name = __name__ 

147 if name.endswith("LVMSR"): 

148 return type == "lvm" 

149 elif name.endswith("EXTSR"): 

150 return type == "ext" 

151 return type == LVHDSR.DRIVER_TYPE 

152 

153 @override 

154 def load(self, sr_uuid) -> None: 

155 self.ops_exclusive = OPS_EXCLUSIVE 

156 

157 self.isMaster = False 

158 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true': 

159 self.isMaster = True 

160 

161 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

162 self.sr_vditype = SR.DEFAULT_TAP 

163 self.uuid = sr_uuid 

164 self.vgname = lvhdutil.VG_PREFIX + self.uuid 

165 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname) 

166 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME) 

167 self.provision = self.PROVISIONING_DEFAULT 

168 

169 has_sr_ref = self.srcmd.params.get("sr_ref") 

170 if has_sr_ref: 

171 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref) 

172 else: 

173 self.other_conf = None 

174 

175 self.lvm_conf = None 

176 if self.other_conf: 

177 self.lvm_conf = self.other_conf.get('lvm-conf') 

178 

179 try: 

180 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf) 

181 except: 

182 raise xs_errors.XenError('SRUnavailable', \ 

183 opterr='Failed to initialise the LVMCache') 

184 self.lvActivator = LVActivator(self.uuid, self.lvmCache) 

185 self.journaler = Journaler(self.lvmCache) 

186 if not has_sr_ref: 

187 return # must be a probe call 

188 # Test for thick vs thin provisioning conf parameter 

189 if 'allocation' in self.dconf: 189 ↛ 190line 189 didn't jump to line 190, because the condition on line 189 was never true

190 if self.dconf['allocation'] in self.PROVISIONING_TYPES: 

191 self.provision = self.dconf['allocation'] 

192 else: 

193 raise xs_errors.XenError('InvalidArg', \ 

194 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES) 

195 

196 if self.other_conf.get(self.TEST_MODE_KEY): 196 ↛ 200line 196 didn't jump to line 200, because the condition on line 196 was never false

197 self.testMode = self.other_conf[self.TEST_MODE_KEY] 

198 self._prepareTestMode() 

199 

200 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

201 # sm_config flag overrides PBD, if any 

202 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES: 

203 self.provision = self.sm_config.get('allocation') 

204 

205 if self.sm_config.get(self.FLAG_USE_VHD) == "true": 

206 self.legacyMode = False 

207 

208 if lvutil._checkVG(self.vgname): 

209 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 209 ↛ 212line 209 didn't jump to line 212, because the condition on line 209 was never false

210 "vdi_activate", "vdi_deactivate"]: 

211 self._undoAllJournals() 

212 if not self.cmd in ["sr_attach", "sr_probe"]: 

213 self._checkMetadataVolume() 

214 

215 self.mdexists = False 

216 

217 # get a VDI -> TYPE map from the storage 

218 contains_uuid_regex = \ 

219 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*") 

220 self.storageVDIs = {} 

221 

222 for key in self.lvmCache.lvs.keys(): 222 ↛ 224line 222 didn't jump to line 224, because the loop on line 222 never started

223 # if the lvname has a uuid in it 

224 type = None 

225 vdi = None 

226 if contains_uuid_regex.search(key) is not None: 

227 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]): 

228 type = vhdutil.VDI_TYPE_VHD 

229 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

230 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]): 

231 type = vhdutil.VDI_TYPE_RAW 

232 vdi = key[len(lvhdutil.LV_PREFIX[type]):] 

233 else: 

234 continue 

235 

236 if type is not None: 

237 self.storageVDIs[vdi] = type 

238 

239 # check if metadata volume exists 

240 try: 

241 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

242 except: 

243 pass 

244 

245 @override 

246 def cleanup(self) -> None: 

247 # we don't need to hold the lock to dec refcounts of activated LVs 

248 if not self.lvActivator.deactivateAll(): 248 ↛ 249line 248 didn't jump to line 249, because the condition on line 248 was never true

249 raise util.SMException("failed to deactivate LVs") 

250 

251 def updateSRMetadata(self, allocation): 

252 try: 

253 # Add SR specific SR metadata 

254 sr_info = \ 

255 {ALLOCATION_TAG: allocation, 

256 UUID_TAG: self.uuid, 

257 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)), 

258 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref)) 

259 } 

260 

261 vdi_info = {} 

262 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref): 

263 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi) 

264 

265 vdi_type = self.session.xenapi.VDI.get_sm_config(vdi).get('vdi_type') 

266 if not vdi_type: 

267 raise xs_errors.XenError('MetadataError', opterr=f"Missing `vdi_type` for VDI {vdi_uuid}") 

268 

269 # Create the VDI entry in the SR metadata 

270 vdi_info[vdi_uuid] = \ 

271 { 

272 UUID_TAG: vdi_uuid, 

273 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)), 

274 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)), 

275 IS_A_SNAPSHOT_TAG: \ 

276 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)), 

277 SNAPSHOT_OF_TAG: \ 

278 self.session.xenapi.VDI.get_snapshot_of(vdi), 

279 SNAPSHOT_TIME_TAG: \ 

280 self.session.xenapi.VDI.get_snapshot_time(vdi), 

281 TYPE_TAG: \ 

282 self.session.xenapi.VDI.get_type(vdi), 

283 VDI_TYPE_TAG: \ 

284 vdi_type, 

285 READ_ONLY_TAG: \ 

286 int(self.session.xenapi.VDI.get_read_only(vdi)), 

287 METADATA_OF_POOL_TAG: \ 

288 self.session.xenapi.VDI.get_metadata_of_pool(vdi), 

289 MANAGED_TAG: \ 

290 int(self.session.xenapi.VDI.get_managed(vdi)) 

291 } 

292 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info) 

293 

294 except Exception as e: 

295 raise xs_errors.XenError('MetadataError', \ 

296 opterr='Error upgrading SR Metadata: %s' % str(e)) 

297 

298 def syncMetadataAndStorage(self): 

299 try: 

300 # if a VDI is present in the metadata but not in the storage 

301 # then delete it from the metadata 

302 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

303 for vdi in list(vdi_info.keys()): 

304 update_map = {} 

305 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 305 ↛ 312line 305 didn't jump to line 312, because the condition on line 305 was never false

306 # delete this from metadata 

307 LVMMetadataHandler(self.mdpath). \ 

308 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG]) 

309 else: 

310 # search for this in the metadata, compare types 

311 # self.storageVDIs is a map of vdi_uuid to vdi_type 

312 if vdi_info[vdi][VDI_TYPE_TAG] != \ 

313 self.storageVDIs[vdi_info[vdi][UUID_TAG]]: 

314 # storage type takes authority 

315 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \ 

316 = METADATA_OBJECT_TYPE_VDI 

317 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG] 

318 update_map[VDI_TYPE_TAG] = \ 

319 self.storageVDIs[vdi_info[vdi][UUID_TAG]] 

320 LVMMetadataHandler(self.mdpath) \ 

321 .updateMetadata(update_map) 

322 else: 

323 # This should never happen 

324 pass 

325 

326 except Exception as e: 

327 raise xs_errors.XenError('MetadataError', \ 

328 opterr='Error synching SR Metadata and storage: %s' % str(e)) 

329 

330 def syncMetadataAndXapi(self): 

331 try: 

332 # get metadata 

333 (sr_info, vdi_info) = \ 

334 LVMMetadataHandler(self.mdpath, False).getMetadata() 

335 

336 # First synch SR parameters 

337 self.update(self.uuid) 

338 

339 # Now update the VDI information in the metadata if required 

340 for vdi_offset in vdi_info.keys(): 

341 try: 

342 vdi_ref = \ 

343 self.session.xenapi.VDI.get_by_uuid( \ 

344 vdi_info[vdi_offset][UUID_TAG]) 

345 except: 

346 # may be the VDI is not in XAPI yet dont bother 

347 continue 

348 

349 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref)) 

350 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref)) 

351 

352 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \ 

353 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \ 

354 new_name_description: 

355 update_map = {} 

356 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

357 METADATA_OBJECT_TYPE_VDI 

358 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG] 

359 update_map[NAME_LABEL_TAG] = new_name_label 

360 update_map[NAME_DESCRIPTION_TAG] = new_name_description 

361 LVMMetadataHandler(self.mdpath) \ 

362 .updateMetadata(update_map) 

363 except Exception as e: 

364 raise xs_errors.XenError('MetadataError', \ 

365 opterr='Error synching SR Metadata and XAPI: %s' % str(e)) 

366 

367 def _checkMetadataVolume(self): 

368 util.SMlog("Entering _checkMetadataVolume") 

369 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

370 if self.isMaster: 370 ↛ 386line 370 didn't jump to line 386, because the condition on line 370 was never false

371 if self.mdexists and self.cmd == "sr_attach": 

372 try: 

373 # activate the management volume 

374 # will be deactivated at detach time 

375 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

376 self._synchSmConfigWithMetaData() 

377 util.SMlog("Sync SR metadata and the state on the storage.") 

378 self.syncMetadataAndStorage() 

379 self.syncMetadataAndXapi() 

380 except Exception as e: 

381 util.SMlog("Exception in _checkMetadataVolume, " \ 

382 "Error: %s." % str(e)) 

383 elif not self.mdexists and not self.legacyMode: 383 ↛ 386line 383 didn't jump to line 386, because the condition on line 383 was never false

384 self._introduceMetaDataVolume() 

385 

386 if self.mdexists: 

387 self.legacyMode = False 

388 

389 def _synchSmConfigWithMetaData(self): 

390 util.SMlog("Synching sm-config with metadata volume") 

391 

392 try: 

393 # get SR info from metadata 

394 sr_info = {} 

395 map = {} 

396 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0] 

397 

398 if sr_info == {}: 398 ↛ 399line 398 didn't jump to line 399, because the condition on line 398 was never true

399 raise Exception("Failed to get SR information from metadata.") 

400 

401 if "allocation" in sr_info: 401 ↛ 405line 401 didn't jump to line 405, because the condition on line 401 was never false

402 self.provision = sr_info.get("allocation") 

403 map['allocation'] = sr_info.get("allocation") 

404 else: 

405 raise Exception("Allocation key not found in SR metadata. " 

406 "SR info found: %s" % sr_info) 

407 

408 except Exception as e: 

409 raise xs_errors.XenError( 

410 'MetadataError', 

411 opterr='Error reading SR params from ' 

412 'metadata Volume: %s' % str(e)) 

413 try: 

414 map[self.FLAG_USE_VHD] = 'true' 

415 self.session.xenapi.SR.set_sm_config(self.sr_ref, map) 

416 except: 

417 raise xs_errors.XenError( 

418 'MetadataError', 

419 opterr='Error updating sm_config key') 

420 

421 def _introduceMetaDataVolume(self): 

422 util.SMlog("Creating Metadata volume") 

423 try: 

424 config = {} 

425 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024) 

426 

427 # activate the management volume, will be deactivated at detach time 

428 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

429 

430 name_label = util.to_plain_string( \ 

431 self.session.xenapi.SR.get_name_label(self.sr_ref)) 

432 name_description = util.to_plain_string( \ 

433 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

434 config[self.FLAG_USE_VHD] = "true" 

435 config['allocation'] = self.provision 

436 self.session.xenapi.SR.set_sm_config(self.sr_ref, config) 

437 

438 # Add the SR metadata 

439 self.updateSRMetadata(self.provision) 

440 except Exception as e: 

441 raise xs_errors.XenError('MetadataError', \ 

442 opterr='Error introducing Metadata Volume: %s' % str(e)) 

443 

444 def _removeMetadataVolume(self): 

445 if self.mdexists: 

446 try: 

447 self.lvmCache.remove(self.MDVOLUME_NAME) 

448 except: 

449 raise xs_errors.XenError('MetadataError', \ 

450 opterr='Failed to delete MGT Volume') 

451 

452 def _refresh_size(self): 

453 """ 

454 Refreshs the size of the backing device. 

455 Return true if all paths/devices agree on the same size. 

456 """ 

457 if hasattr(self, 'SCSIid'): 457 ↛ 459line 457 didn't jump to line 459, because the condition on line 457 was never true

458 # LVHDoHBASR, LVHDoISCSISR 

459 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid')) 

460 else: 

461 # LVHDSR 

462 devices = self.dconf['device'].split(',') 

463 scsiutil.refreshdev(devices) 

464 return True 

465 

466 def _expand_size(self): 

467 """ 

468 Expands the size of the SR by growing into additional availiable 

469 space, if extra space is availiable on the backing device. 

470 Needs to be called after a successful call of _refresh_size. 

471 """ 

472 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size'] 

473 # We are comparing PV- with VG-sizes that are aligned. Need a threshold 

474 resizethreshold = 100 * 1024 * 1024 # 100MB 

475 devices = self.dconf['device'].split(',') 

476 totaldevicesize = 0 

477 for device in devices: 

478 totaldevicesize = totaldevicesize + scsiutil.getsize(device) 

479 if totaldevicesize >= (currentvgsize + resizethreshold): 

480 try: 

481 if hasattr(self, 'SCSIid'): 481 ↛ 483line 481 didn't jump to line 483, because the condition on line 481 was never true

482 # LVHDoHBASR, LVHDoISCSISR might have slaves 

483 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session, 

484 getattr(self, 'SCSIid')) 

485 util.SMlog("LVHDSR._expand_size for %s will resize the pv." % 

486 self.uuid) 

487 for pv in lvutil.get_pv_for_vg(self.vgname): 

488 lvutil.resizePV(pv) 

489 except: 

490 util.logException("LVHDSR._expand_size for %s failed to resize" 

491 " the PV" % self.uuid) 

492 

493 @override 

494 @deviceCheck 

495 def create(self, uuid, size) -> None: 

496 util.SMlog("LVHDSR.create for %s" % self.uuid) 

497 if not self.isMaster: 

498 util.SMlog('sr_create blocked for non-master') 

499 raise xs_errors.XenError('LVMMaster') 

500 

501 if lvutil._checkVG(self.vgname): 

502 raise xs_errors.XenError('SRExists') 

503 

504 # Check none of the devices already in use by other PBDs 

505 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']): 

506 raise xs_errors.XenError('SRInUse') 

507 

508 # Check serial number entry in SR records 

509 for dev in self.dconf['device'].split(','): 

510 if util.test_scsiserial(self.session, dev): 

511 raise xs_errors.XenError('SRInUse') 

512 

513 lvutil.createVG(self.dconf['device'], self.vgname) 

514 

515 #Update serial number string 

516 scsiutil.add_serial_record(self.session, self.sr_ref, \ 

517 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

518 

519 # since this is an SR.create turn off legacy mode 

520 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \ 

521 self.FLAG_USE_VHD, 'true') 

522 

523 @override 

524 def delete(self, uuid) -> None: 

525 util.SMlog("LVHDSR.delete for %s" % self.uuid) 

526 if not self.isMaster: 

527 raise xs_errors.XenError('LVMMaster') 

528 cleanup.gc_force(self.session, self.uuid) 

529 

530 success = True 

531 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

532 if util.extractSRFromDevMapper(fileName) != self.uuid: 

533 continue 

534 

535 if util.doesFileHaveOpenHandles(fileName): 

536 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \ 

537 "handles" % fileName) 

538 success = False 

539 continue 

540 

541 # Now attempt to remove the dev mapper entry 

542 if not lvutil.removeDevMapperEntry(fileName, False): 

543 success = False 

544 continue 

545 

546 try: 

547 lvname = os.path.basename(fileName.replace('-', '/'). \ 

548 replace('//', '-')) 

549 lpath = os.path.join(self.path, lvname) 

550 os.unlink(lpath) 

551 except OSError as e: 

552 if e.errno != errno.ENOENT: 

553 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \ 

554 "file %s. Error: %s" % (fileName, str(e))) 

555 success = False 

556 

557 if success: 

558 try: 

559 if util.pathexists(self.path): 

560 os.rmdir(self.path) 

561 except Exception as e: 

562 util.SMlog("LVHDSR.delete: failed to remove the symlink " \ 

563 "directory %s. Error: %s" % (self.path, str(e))) 

564 success = False 

565 

566 self._removeMetadataVolume() 

567 self.lvmCache.refresh() 

568 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0: 

569 raise xs_errors.XenError('SRNotEmpty') 

570 

571 if not success: 

572 raise Exception("LVHDSR delete failed, please refer to the log " \ 

573 "for details.") 

574 

575 lvutil.removeVG(self.dconf['device'], self.vgname) 

576 self._cleanup() 

577 

578 @override 

579 def attach(self, uuid) -> None: 

580 util.SMlog("LVHDSR.attach for %s" % self.uuid) 

581 

582 self._cleanup(True) # in case of host crashes, if detach wasn't called 

583 

584 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 584 ↛ 585line 584 didn't jump to line 585, because the condition on line 584 was never true

585 raise xs_errors.XenError('SRUnavailable', \ 

586 opterr='no such volume group: %s' % self.vgname) 

587 

588 # Refresh the metadata status 

589 self._checkMetadataVolume() 

590 

591 refreshsizeok = self._refresh_size() 

592 

593 if self.isMaster: 593 ↛ 604line 593 didn't jump to line 604, because the condition on line 593 was never false

594 if refreshsizeok: 594 ↛ 598line 594 didn't jump to line 598, because the condition on line 594 was never false

595 self._expand_size() 

596 

597 # Update SCSIid string 

598 util.SMlog("Calling devlist_to_serial") 

599 scsiutil.add_serial_record( 

600 self.session, self.sr_ref, 

601 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

602 

603 # Test Legacy Mode Flag and update if VHD volumes exist 

604 if self.isMaster and self.legacyMode: 604 ↛ 605line 604 didn't jump to line 605, because the condition on line 604 was never true

605 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

606 for uuid, info in vdiInfo.items(): 

607 if info.vdiType == vhdutil.VDI_TYPE_VHD: 

608 self.legacyMode = False 

609 map = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

610 self._introduceMetaDataVolume() 

611 break 

612 

613 # Set the block scheduler 

614 for dev in self.dconf['device'].split(','): 

615 self.block_setscheduler(dev) 

616 

617 @override 

618 def detach(self, uuid) -> None: 

619 util.SMlog("LVHDSR.detach for %s" % self.uuid) 

620 cleanup.abort(self.uuid) 

621 

622 # Do a best effort cleanup of the dev mapper entries 

623 # go through all devmapper entries for this VG 

624 success = True 

625 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

626 if util.extractSRFromDevMapper(fileName) != self.uuid: 626 ↛ 627line 626 didn't jump to line 627, because the condition on line 626 was never true

627 continue 

628 

629 with Fairlock('devicemapper'): 

630 # check if any file has open handles 

631 if util.doesFileHaveOpenHandles(fileName): 

632 # if yes, log this and signal failure 

633 util.SMlog( 

634 f"LVHDSR.detach: The dev mapper entry {fileName} has " 

635 "open handles") 

636 success = False 

637 continue 

638 

639 # Now attempt to remove the dev mapper entry 

640 if not lvutil.removeDevMapperEntry(fileName, False): 640 ↛ 641line 640 didn't jump to line 641, because the condition on line 640 was never true

641 success = False 

642 continue 

643 

644 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/* 

645 try: 

646 lvname = os.path.basename(fileName.replace('-', '/'). \ 

647 replace('//', '-')) 

648 lvname = os.path.join(self.path, lvname) 

649 util.force_unlink(lvname) 

650 except Exception as e: 

651 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \ 

652 "file %s. Error: %s" % (fileName, str(e))) 

653 success = False 

654 

655 # now remove the directory where the symlinks are 

656 # this should pass as the directory should be empty by now 

657 if success: 

658 try: 

659 if util.pathexists(self.path): 659 ↛ 660line 659 didn't jump to line 660, because the condition on line 659 was never true

660 os.rmdir(self.path) 

661 except Exception as e: 

662 util.SMlog("LVHDSR.detach: failed to remove the symlink " \ 

663 "directory %s. Error: %s" % (self.path, str(e))) 

664 success = False 

665 

666 if not success: 

667 raise Exception("SR detach failed, please refer to the log " \ 

668 "for details.") 

669 

670 # Don't delete lock files on the master as it will break the locking 

671 # between SM and any GC thread that survives through SR.detach. 

672 # However, we should still delete lock files on slaves as it is the 

673 # only place to do so. 

674 self._cleanup(self.isMaster) 

675 

676 @override 

677 def forget_vdi(self, uuid) -> None: 

678 if not self.legacyMode: 

679 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid) 

680 super(LVHDSR, self).forget_vdi(uuid) 

681 

682 @override 

683 def scan(self, uuid) -> None: 

684 activated_lvs = set() 

685 try: 

686 util.SMlog("LVHDSR.scan for %s" % self.uuid) 

687 if not self.isMaster: 687 ↛ 688line 687 didn't jump to line 688, because the condition on line 687 was never true

688 util.SMlog('sr_scan blocked for non-master') 

689 raise xs_errors.XenError('LVMMaster') 

690 

691 if self._refresh_size(): 691 ↛ 693line 691 didn't jump to line 693, because the condition on line 691 was never false

692 self._expand_size() 

693 self.lvmCache.refresh() 

694 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG) 

695 self._loadvdis() 

696 stats = lvutil._getVGstats(self.vgname) 

697 self.physical_size = stats['physical_size'] 

698 self.physical_utilisation = stats['physical_utilisation'] 

699 

700 # Now check if there are any VDIs in the metadata, which are not in 

701 # XAPI 

702 if self.mdexists: 702 ↛ 813line 702 didn't jump to line 813, because the condition on line 702 was never false

703 vdiToSnaps: Dict[str, List[str]] = {} 

704 # get VDIs from XAPI 

705 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref) 

706 vdi_uuids = set([]) 

707 for vdi in vdis: 

708 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi)) 

709 

710 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

711 

712 for vdi in list(info.keys()): 

713 vdi_uuid = info[vdi][UUID_TAG] 

714 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 714 ↛ 715line 714 didn't jump to line 715, because the condition on line 714 was never true

715 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps: 

716 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid) 

717 else: 

718 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid] 

719 

720 if vdi_uuid not in vdi_uuids: 

721 util.SMlog("Introduce VDI %s as it is present in " \ 

722 "metadata and not in XAPI." % vdi_uuid) 

723 sm_config = {} 

724 sm_config['vdi_type'] = info[vdi][VDI_TYPE_TAG] 

725 lvname = "%s%s" % \ 

726 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid) 

727 self.lvActivator.activate( 

728 vdi_uuid, lvname, LVActivator.NORMAL) 

729 activated_lvs.add(vdi_uuid) 

730 lvPath = os.path.join(self.path, lvname) 

731 

732 if info[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW: 732 ↛ 733line 732 didn't jump to line 733, because the condition on line 732 was never true

733 size = self.lvmCache.getSize( \ 

734 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \ 

735 vdi_uuid) 

736 utilisation = \ 

737 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

738 int(size)) 

739 else: 

740 parent = \ 

741 vhdutil._getVHDParentNoCheck(lvPath) 

742 

743 if parent is not None: 743 ↛ 744line 743 didn't jump to line 744, because the condition on line 743 was never true

744 sm_config['vhd-parent'] = parent[len( \ 

745 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):] 

746 size = vhdutil.getSizeVirt(lvPath) 

747 if self.provision == "thin": 747 ↛ 748line 747 didn't jump to line 748

748 utilisation = \ 

749 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

750 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

751 else: 

752 utilisation = lvhdutil.calcSizeVHDLV(int(size)) 

753 

754 vdi_ref = self.session.xenapi.VDI.db_introduce( 

755 vdi_uuid, 

756 info[vdi][NAME_LABEL_TAG], 

757 info[vdi][NAME_DESCRIPTION_TAG], 

758 self.sr_ref, 

759 info[vdi][TYPE_TAG], 

760 False, 

761 bool(int(info[vdi][READ_ONLY_TAG])), 

762 {}, 

763 vdi_uuid, 

764 {}, 

765 sm_config) 

766 

767 self.session.xenapi.VDI.set_managed(vdi_ref, 

768 bool(int(info[vdi][MANAGED_TAG]))) 

769 self.session.xenapi.VDI.set_virtual_size(vdi_ref, 

770 str(size)) 

771 self.session.xenapi.VDI.set_physical_utilisation( \ 

772 vdi_ref, str(utilisation)) 

773 self.session.xenapi.VDI.set_is_a_snapshot( \ 

774 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG]))) 

775 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 775 ↛ 776line 775 didn't jump to line 776, because the condition on line 775 was never true

776 self.session.xenapi.VDI.set_snapshot_time( \ 

777 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG])) 

778 if info[vdi][TYPE_TAG] == 'metadata': 778 ↛ 779line 778 didn't jump to line 779, because the condition on line 778 was never true

779 self.session.xenapi.VDI.set_metadata_of_pool( \ 

780 vdi_ref, info[vdi][METADATA_OF_POOL_TAG]) 

781 

782 # Update CBT status of disks either just added 

783 # or already in XAPI 

784 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG) 

785 if cbt_logname in cbt_vdis: 785 ↛ 786line 785 didn't jump to line 786, because the condition on line 785 was never true

786 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) 

787 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True) 

788 # For existing VDIs, update local state too 

789 # Scan in base class SR updates existing VDIs 

790 # again based on local states 

791 if vdi_uuid in self.vdis: 

792 self.vdis[vdi_uuid].cbt_enabled = True 

793 cbt_vdis.remove(cbt_logname) 

794 

795 # Now set the snapshot statuses correctly in XAPI 

796 for srcvdi in vdiToSnaps.keys(): 796 ↛ 797line 796 didn't jump to line 797, because the loop on line 796 never started

797 try: 

798 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi) 

799 except: 

800 # the source VDI no longer exists, continue 

801 continue 

802 

803 for snapvdi in vdiToSnaps[srcvdi]: 

804 try: 

805 # this might fail in cases where its already set 

806 snapref = \ 

807 self.session.xenapi.VDI.get_by_uuid(snapvdi) 

808 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref) 

809 except Exception as e: 

810 util.SMlog("Setting snapshot failed. " \ 

811 "Error: %s" % str(e)) 

812 

813 if cbt_vdis: 813 ↛ 824line 813 didn't jump to line 824, because the condition on line 813 was never false

814 # If we have items remaining in this list, 

815 # they are cbt_metadata VDI that XAPI doesn't know about 

816 # Add them to self.vdis and they'll get added to the DB 

817 for cbt_vdi in cbt_vdis: 817 ↛ 818line 817 didn't jump to line 818, because the loop on line 817 never started

818 cbt_uuid = cbt_vdi.split(".")[0] 

819 new_vdi = self.vdi(cbt_uuid) 

820 new_vdi.ty = "cbt_metadata" 

821 new_vdi.cbt_enabled = True 

822 self.vdis[cbt_uuid] = new_vdi 

823 

824 super(LVHDSR, self).scan(uuid) 

825 self._kickGC() 

826 

827 finally: 

828 for vdi in activated_lvs: 

829 self.lvActivator.deactivate( 

830 vdi, LVActivator.NORMAL, False) 

831 

832 @override 

833 def update(self, uuid) -> None: 

834 if not lvutil._checkVG(self.vgname): 834 ↛ 835line 834 didn't jump to line 835, because the condition on line 834 was never true

835 return 

836 self._updateStats(uuid, 0) 

837 

838 if self.legacyMode: 838 ↛ 839line 838 didn't jump to line 839, because the condition on line 838 was never true

839 return 

840 

841 # synch name_label in metadata with XAPI 

842 update_map = {} 

843 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \ 

844 METADATA_OBJECT_TYPE_SR, 

845 NAME_LABEL_TAG: util.to_plain_string( \ 

846 self.session.xenapi.SR.get_name_label(self.sr_ref)), 

847 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

848 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

849 } 

850 LVMMetadataHandler(self.mdpath).updateMetadata(update_map) 

851 

852 def _updateStats(self, uuid, virtAllocDelta): 

853 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

854 self.virtual_allocation = valloc + virtAllocDelta 

855 util.SMlog("Setting virtual_allocation of SR %s to %d" % 

856 (uuid, self.virtual_allocation)) 

857 stats = lvutil._getVGstats(self.vgname) 

858 self.physical_size = stats['physical_size'] 

859 self.physical_utilisation = stats['physical_utilisation'] 

860 self._db_update() 

861 

862 @override 

863 @deviceCheck 

864 def probe(self) -> str: 

865 return lvutil.srlist_toxml( 

866 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']), 

867 lvhdutil.VG_PREFIX, 

868 ('metadata' in self.srcmd.params['sr_sm_config'] and \ 

869 self.srcmd.params['sr_sm_config']['metadata'] == 'true')) 

870 

871 @override 

872 def vdi(self, uuid) -> VDI.VDI: 

873 return LVHDVDI(self, uuid) 

874 

875 def _loadvdis(self): 

876 self.virtual_allocation = 0 

877 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) 

878 self.allVDIs = {} 

879 

880 for uuid, info in self.vdiInfo.items(): 

881 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 881 ↛ 882line 881 didn't jump to line 882, because the condition on line 881 was never true

882 continue 

883 if info.scanError: 883 ↛ 884line 883 didn't jump to line 884, because the condition on line 883 was never true

884 raise xs_errors.XenError('VDIUnavailable', \ 

885 opterr='Error scanning VDI %s' % uuid) 

886 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid) 

887 if not self.vdis[uuid].hidden: 887 ↛ 880line 887 didn't jump to line 880, because the condition on line 887 was never false

888 self.virtual_allocation += self.vdis[uuid].utilisation 

889 

890 for uuid, vdi in self.vdis.items(): 

891 if vdi.parent: 891 ↛ 892line 891 didn't jump to line 892, because the condition on line 891 was never true

892 if vdi.parent in self.vdis: 

893 self.vdis[vdi.parent].read_only = True 

894 if vdi.parent in geneology: 

895 geneology[vdi.parent].append(uuid) 

896 else: 

897 geneology[vdi.parent] = [uuid] 

898 

899 # Now remove all hidden leaf nodes to avoid introducing records that 

900 # will be GC'ed 

901 for uuid in list(self.vdis.keys()): 

902 if uuid not in geneology and self.vdis[uuid].hidden: 902 ↛ 903line 902 didn't jump to line 903, because the condition on line 902 was never true

903 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

904 del self.vdis[uuid] 

905 

906 def _ensureSpaceAvailable(self, amount_needed): 

907 space_available = lvutil._getVGstats(self.vgname)['freespace'] 

908 if (space_available < amount_needed): 

909 util.SMlog("Not enough space! free space: %d, need: %d" % \ 

910 (space_available, amount_needed)) 

911 raise xs_errors.XenError('SRNoSpace') 

912 

913 def _handleInterruptedCloneOps(self): 

914 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE) 

915 for uuid, val in entries.items(): 915 ↛ 916line 915 didn't jump to line 916, because the loop on line 915 never started

916 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid) 

917 self._handleInterruptedCloneOp(uuid, val) 

918 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid) 

919 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid) 

920 

921 def _handleInterruptedCoalesceLeaf(self): 

922 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF) 

923 if len(entries) > 0: 923 ↛ 924line 923 didn't jump to line 924, because the condition on line 923 was never true

924 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***") 

925 cleanup.gc_force(self.session, self.uuid) 

926 self.lvmCache.refresh() 

927 

928 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False): 

929 """Either roll back or finalize the interrupted snapshot/clone 

930 operation. Rolling back is unsafe if the leaf VHDs have already been 

931 in use and written to. However, it is always safe to roll back while 

932 we're still in the context of the failed snapshot operation since the 

933 VBD is paused for the duration of the operation""" 

934 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval)) 

935 lvs = lvhdutil.getLVInfo(self.lvmCache) 

936 baseUuid, clonUuid = jval.split("_") 

937 

938 # is there a "base copy" VDI? 

939 if not lvs.get(baseUuid): 

940 # no base copy: make sure the original is there 

941 if lvs.get(origUuid): 

942 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do") 

943 return 

944 raise util.SMException("base copy %s not present, " \ 

945 "but no original %s found" % (baseUuid, origUuid)) 

946 

947 if forceUndo: 

948 util.SMlog("Explicit revert") 

949 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

950 return 

951 

952 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)): 

953 util.SMlog("One or both leaves missing => revert") 

954 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

955 return 

956 

957 vdis = lvhdutil.getVDIInfo(self.lvmCache) 

958 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError): 

959 util.SMlog("One or both leaves invalid => revert") 

960 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

961 return 

962 

963 orig = vdis[origUuid] 

964 base = vdis[baseUuid] 

965 self.lvActivator.activate(baseUuid, base.lvName, False) 

966 self.lvActivator.activate(origUuid, orig.lvName, False) 

967 if orig.parentUuid != baseUuid: 

968 parent = vdis[orig.parentUuid] 

969 self.lvActivator.activate(parent.uuid, parent.lvName, False) 

970 origPath = os.path.join(self.path, orig.lvName) 

971 if not vhdutil.check(origPath): 

972 util.SMlog("Orig VHD invalid => revert") 

973 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

974 return 

975 

976 if clonUuid: 

977 clon = vdis[clonUuid] 

978 clonPath = os.path.join(self.path, clon.lvName) 

979 self.lvActivator.activate(clonUuid, clon.lvName, False) 

980 if not vhdutil.check(clonPath): 

981 util.SMlog("Clon VHD invalid => revert") 

982 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) 

983 return 

984 

985 util.SMlog("Snapshot appears valid, will not roll back") 

986 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid) 

987 

988 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid): 

989 base = lvs[baseUuid] 

990 basePath = os.path.join(self.path, base.name) 

991 

992 # make the parent RW 

993 if base.readonly: 

994 self.lvmCache.setReadonly(base.name, False) 

995 

996 ns = lvhdutil.NS_PREFIX_LVM + self.uuid 

997 origRefcountBinary = RefCounter.check(origUuid, ns)[1] 

998 origRefcountNormal = 0 

999 

1000 # un-hide the parent 

1001 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

1002 self.lvActivator.activate(baseUuid, base.name, False) 

1003 origRefcountNormal = 1 

1004 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False) 

1005 if vhdInfo.hidden: 

1006 vhdutil.setHidden(basePath, False) 

1007 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden: 

1008 self.lvmCache.setHidden(base.name, False) 

1009 

1010 # remove the child nodes 

1011 if clonUuid and lvs.get(clonUuid): 

1012 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD: 

1013 raise util.SMException("clone %s not VHD" % clonUuid) 

1014 self.lvmCache.remove(lvs[clonUuid].name) 

1015 if self.lvActivator.get(clonUuid, False): 

1016 self.lvActivator.remove(clonUuid, False) 

1017 if lvs.get(origUuid): 

1018 self.lvmCache.remove(lvs[origUuid].name) 

1019 

1020 # inflate the parent to fully-allocated size 

1021 if base.vdiType == vhdutil.VDI_TYPE_VHD: 

1022 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1023 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize) 

1024 

1025 # rename back 

1026 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid 

1027 self.lvmCache.rename(base.name, origLV) 

1028 RefCounter.reset(baseUuid, ns) 

1029 if self.lvActivator.get(baseUuid, False): 

1030 self.lvActivator.replace(baseUuid, origUuid, origLV, False) 

1031 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns) 

1032 

1033 # At this stage, tapdisk and SM vdi will be in paused state. Remove 

1034 # flag to facilitate vm deactivate 

1035 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1036 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused') 

1037 

1038 # update LVM metadata on slaves 

1039 slaves = util.get_slaves_attached_on(self.session, [origUuid]) 

1040 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname, 

1041 origLV, origUuid, slaves) 

1042 

1043 util.SMlog("*** INTERRUPTED CLONE OP: rollback success") 

1044 

1045 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid): 

1046 """Finalize the interrupted snapshot/clone operation. This must not be 

1047 called from the live snapshot op context because we attempt to pause/ 

1048 unpause the VBD here (the VBD is already paused during snapshot, so it 

1049 would cause a deadlock)""" 

1050 base = vdis[baseUuid] 

1051 clon = None 

1052 if clonUuid: 

1053 clon = vdis[clonUuid] 

1054 

1055 cleanup.abort(self.uuid) 

1056 

1057 # make sure the parent is hidden and read-only 

1058 if not base.hidden: 

1059 if base.vdiType == vhdutil.VDI_TYPE_RAW: 

1060 self.lvmCache.setHidden(base.lvName) 

1061 else: 

1062 basePath = os.path.join(self.path, base.lvName) 

1063 vhdutil.setHidden(basePath) 

1064 if not base.lvReadonly: 

1065 self.lvmCache.setReadonly(base.lvName, True) 

1066 

1067 # NB: since this snapshot-preserving call is only invoked outside the 

1068 # snapshot op context, we assume the LVM metadata on the involved slave 

1069 # has by now been refreshed and do not attempt to do it here 

1070 

1071 # Update the original record 

1072 try: 

1073 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1074 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

1075 type = self.session.xenapi.VDI.get_type(vdi_ref) 

1076 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1077 sm_config['vhd-parent'] = baseUuid 

1078 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

1079 except XenAPI.Failure: 

1080 util.SMlog("ERROR updating the orig record") 

1081 

1082 # introduce the new VDI records 

1083 if clonUuid: 

1084 try: 

1085 clon_vdi = VDI.VDI(self, clonUuid) 

1086 clon_vdi.read_only = False 

1087 clon_vdi.location = clonUuid 

1088 clon_vdi.utilisation = clon.sizeLV 

1089 clon_vdi.sm_config = { 

1090 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1091 "vhd-parent": baseUuid} 

1092 

1093 if not self.legacyMode: 

1094 LVMMetadataHandler(self.mdpath). \ 

1095 ensureSpaceIsAvailableForVdis(1) 

1096 

1097 clon_vdi_ref = clon_vdi._db_introduce() 

1098 util.SMlog("introduced clon VDI: %s (%s)" % \ 

1099 (clon_vdi_ref, clonUuid)) 

1100 

1101 vdi_info = {UUID_TAG: clonUuid, 

1102 NAME_LABEL_TAG: clon_vdi.label, 

1103 NAME_DESCRIPTION_TAG: clon_vdi.description, 

1104 IS_A_SNAPSHOT_TAG: 0, 

1105 SNAPSHOT_OF_TAG: '', 

1106 SNAPSHOT_TIME_TAG: '', 

1107 TYPE_TAG: type, 

1108 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'], 

1109 READ_ONLY_TAG: int(clon_vdi.read_only), 

1110 MANAGED_TAG: int(clon_vdi.managed), 

1111 METADATA_OF_POOL_TAG: '' 

1112 } 

1113 

1114 if not self.legacyMode: 

1115 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1116 

1117 except XenAPI.Failure: 

1118 util.SMlog("ERROR introducing the clon record") 

1119 

1120 try: 

1121 base_vdi = VDI.VDI(self, baseUuid) # readonly parent 

1122 base_vdi.label = "base copy" 

1123 base_vdi.read_only = True 

1124 base_vdi.location = baseUuid 

1125 base_vdi.size = base.sizeVirt 

1126 base_vdi.utilisation = base.sizeLV 

1127 base_vdi.managed = False 

1128 base_vdi.sm_config = { 

1129 "vdi_type": vhdutil.VDI_TYPE_VHD, 

1130 "vhd-parent": baseUuid} 

1131 

1132 if not self.legacyMode: 

1133 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1) 

1134 

1135 base_vdi_ref = base_vdi._db_introduce() 

1136 util.SMlog("introduced base VDI: %s (%s)" % \ 

1137 (base_vdi_ref, baseUuid)) 

1138 

1139 vdi_info = {UUID_TAG: baseUuid, 

1140 NAME_LABEL_TAG: base_vdi.label, 

1141 NAME_DESCRIPTION_TAG: base_vdi.description, 

1142 IS_A_SNAPSHOT_TAG: 0, 

1143 SNAPSHOT_OF_TAG: '', 

1144 SNAPSHOT_TIME_TAG: '', 

1145 TYPE_TAG: type, 

1146 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'], 

1147 READ_ONLY_TAG: int(base_vdi.read_only), 

1148 MANAGED_TAG: int(base_vdi.managed), 

1149 METADATA_OF_POOL_TAG: '' 

1150 } 

1151 

1152 if not self.legacyMode: 

1153 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1154 except XenAPI.Failure: 

1155 util.SMlog("ERROR introducing the base record") 

1156 

1157 util.SMlog("*** INTERRUPTED CLONE OP: complete") 

1158 

1159 def _undoAllJournals(self): 

1160 """Undo all VHD & SM interrupted journaled operations. This call must 

1161 be serialized with respect to all operations that create journals""" 

1162 # undoing interrupted inflates must be done first, since undoing VHD 

1163 # ops might require inflations 

1164 self.lock.acquire() 

1165 try: 

1166 self._undoAllInflateJournals() 

1167 self._undoAllVHDJournals() 

1168 self._handleInterruptedCloneOps() 

1169 self._handleInterruptedCoalesceLeaf() 

1170 finally: 

1171 self.lock.release() 

1172 self.cleanup() 

1173 

1174 def _undoAllInflateJournals(self): 

1175 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE) 

1176 if len(entries) == 0: 

1177 return 

1178 self._loadvdis() 

1179 for uuid, val in entries.items(): 

1180 vdi = self.vdis.get(uuid) 

1181 if vdi: 1181 ↛ 1196line 1181 didn't jump to line 1196, because the condition on line 1181 was never false

1182 util.SMlog("Found inflate journal %s, deflating %s to %s" % \ 

1183 (uuid, vdi.path, val)) 

1184 if vdi.readonly: 1184 ↛ 1185line 1184 didn't jump to line 1185, because the condition on line 1184 was never true

1185 self.lvmCache.setReadonly(vdi.lvname, False) 

1186 self.lvActivator.activate(uuid, vdi.lvname, False) 

1187 currSizeLV = self.lvmCache.getSize(vdi.lvname) 

1188 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE, 

1189 vhdutil.VHD_FOOTER_SIZE) 

1190 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val)) 

1191 if vdi.readonly: 1191 ↛ 1192line 1191 didn't jump to line 1192, because the condition on line 1191 was never true

1192 self.lvmCache.setReadonly(vdi.lvname, True) 

1193 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1193 ↛ 1194line 1193 didn't jump to line 1194, because the condition on line 1193 was never true

1194 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1195 self.vgname, vdi.lvname, uuid) 

1196 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid) 

1197 delattr(self, "vdiInfo") 

1198 delattr(self, "allVDIs") 

1199 

1200 def _undoAllVHDJournals(self): 

1201 """check if there are VHD journals in existence and revert them""" 

1202 journals = lvhdutil.getAllVHDJournals(self.lvmCache) 

1203 if len(journals) == 0: 1203 ↛ 1205line 1203 didn't jump to line 1205, because the condition on line 1203 was never false

1204 return 

1205 self._loadvdis() 

1206 for uuid, jlvName in journals: 

1207 vdi = self.vdis[uuid] 

1208 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path)) 

1209 self.lvActivator.activate(uuid, vdi.lvname, False) 

1210 self.lvmCache.activateNoRefcount(jlvName) 

1211 fullSize = lvhdutil.calcSizeVHDLV(vdi.size) 

1212 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize) 

1213 try: 

1214 jFile = os.path.join(self.path, jlvName) 

1215 vhdutil.revert(vdi.path, jFile) 

1216 except util.CommandException: 

1217 util.logException("VHD journal revert") 

1218 vhdutil.check(vdi.path) 

1219 util.SMlog("VHD revert failed but VHD ok: removing journal") 

1220 # Attempt to reclaim unused space 

1221 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False) 

1222 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) 

1223 if NewSize < fullSize: 

1224 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize)) 

1225 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, 

1226 self.vgname, vdi.lvname, uuid) 

1227 self.lvmCache.remove(jlvName) 

1228 delattr(self, "vdiInfo") 

1229 delattr(self, "allVDIs") 

1230 

1231 def call_on_slave(self, args, host_refs, message: str): 

1232 master_ref = util.get_this_host_ref(self.session) 

1233 for hostRef in host_refs: 

1234 if hostRef == master_ref: 1234 ↛ 1235line 1234 didn't jump to line 1235, because the condition on line 1234 was never true

1235 continue 

1236 util.SMlog(f"{message} on slave {hostRef}") 

1237 rv = self.session.xenapi.host.call_plugin( 

1238 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1239 util.SMlog("call-plugin returned: %s" % rv) 

1240 if not rv: 1240 ↛ 1241line 1240 didn't jump to line 1241, because the condition on line 1240 was never true

1241 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1242 

1243 def _updateSlavesPreClone(self, hostRefs, origOldLV): 

1244 args = {"vgName": self.vgname, 

1245 "action1": "deactivateNoRefcount", 

1246 "lvName1": origOldLV} 

1247 message = "Deactivate VDI" 

1248 self.call_on_slave(args, hostRefs, message) 

1249 

1250 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV, 

1251 baseUuid, baseLV): 

1252 """We need to reactivate the original LV on each slave (note that the 

1253 name for the original LV might change), as well as init the refcount 

1254 for the base LV""" 

1255 args = {"vgName": self.vgname, 

1256 "action1": "refresh", 

1257 "lvName1": origLV, 

1258 "action2": "activate", 

1259 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid, 

1260 "lvName2": baseLV, 

1261 "uuid2": baseUuid} 

1262 

1263 message = f"Updating {origOldLV}, {origLV}, {baseLV}" 

1264 self.call_on_slave(args, hostRefs, message) 

1265 

1266 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog): 

1267 """Reactivate and refresh CBT log file on slaves""" 

1268 args = {"vgName": self.vgname, 

1269 "action1": "deactivateNoRefcount", 

1270 "lvName1": cbtlog, 

1271 "action2": "refresh", 

1272 "lvName2": cbtlog} 

1273 

1274 message = f"Updating {cbtlog}" 

1275 self.call_on_slave(args, hostRefs, message) 

1276 

1277 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV): 

1278 """Tell the slave we deleted the base image""" 

1279 args = {"vgName": self.vgname, 

1280 "action1": "cleanupLockAndRefcount", 

1281 "uuid1": baseUuid, 

1282 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid} 

1283 

1284 message = f"Cleaning locks for {baseLV}" 

1285 self.call_on_slave(args, hostRefs, message) 

1286 

1287 def _cleanup(self, skipLockCleanup=False): 

1288 """delete stale refcounter, flag, and lock files""" 

1289 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1290 IPCFlag(self.uuid).clearAll() 

1291 if not skipLockCleanup: 1291 ↛ 1292line 1291 didn't jump to line 1292, because the condition on line 1291 was never true

1292 Lock.cleanupAll(self.uuid) 

1293 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid) 

1294 

1295 def _prepareTestMode(self): 

1296 util.SMlog("Test mode: %s" % self.testMode) 

1297 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1297 ↛ 1298line 1297 didn't jump to line 1298, because the condition on line 1297 was never true

1298 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes" 

1299 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode]) 

1300 

1301 def _kickGC(self): 

1302 util.SMlog("Kicking GC") 

1303 cleanup.start_gc_service(self.uuid) 

1304 

1305 def ensureCBTSpace(self): 

1306 # Ensure we have space for at least one LV 

1307 self._ensureSpaceAvailable(self.journaler.LV_SIZE) 

1308 

1309 

1310class LVHDVDI(VDI.VDI): 

1311 

1312 JRN_CLONE = "clone" # journal entry type for the clone operation 

1313 

1314 @override 

1315 def load(self, vdi_uuid) -> None: 

1316 self.lock = self.sr.lock 

1317 self.lvActivator = self.sr.lvActivator 

1318 self.loaded = False 

1319 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1320 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1320 ↛ 1322line 1320 didn't jump to line 1322, because the condition on line 1320 was never false

1321 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1322 self.uuid = vdi_uuid 

1323 self.location = self.uuid 

1324 self.exists = True 

1325 

1326 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid): 

1327 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid]) 

1328 if self.parent: 1328 ↛ 1329line 1328 didn't jump to line 1329, because the condition on line 1328 was never true

1329 self.sm_config_override['vhd-parent'] = self.parent 

1330 else: 

1331 self.sm_config_override['vhd-parent'] = None 

1332 return 

1333 

1334 # scan() didn't run: determine the type of the VDI manually 

1335 if self._determineType(): 

1336 return 

1337 

1338 # the VDI must be in the process of being created 

1339 self.exists = False 

1340 if "vdi_sm_config" in self.sr.srcmd.params and \ 1340 ↛ 1342line 1340 didn't jump to line 1342, because the condition on line 1340 was never true

1341 "type" in self.sr.srcmd.params["vdi_sm_config"]: 

1342 type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

1343 if type == PARAM_RAW: 

1344 self.vdi_type = vhdutil.VDI_TYPE_RAW 

1345 elif type == PARAM_VHD: 

1346 self.vdi_type = vhdutil.VDI_TYPE_VHD 

1347 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode: 

1348 raise xs_errors.XenError('VDICreate', \ 

1349 opterr='Cannot create VHD type disk in legacy mode') 

1350 else: 

1351 raise xs_errors.XenError('VDICreate', opterr='bad type') 

1352 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid) 

1353 self.path = os.path.join(self.sr.path, self.lvname) 

1354 

1355 @override 

1356 def create(self, sr_uuid, vdi_uuid, size) -> str: 

1357 util.SMlog("LVHDVDI.create for %s" % self.uuid) 

1358 if not self.sr.isMaster: 

1359 raise xs_errors.XenError('LVMMaster') 

1360 if self.exists: 

1361 raise xs_errors.XenError('VDIExists') 

1362 

1363 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1364 

1365 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \ 

1366 (self.vdi_type, self.path, size)) 

1367 lvSize = 0 

1368 self.sm_config = self.sr.srcmd.params["vdi_sm_config"] 

1369 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1370 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size)) 

1371 else: 

1372 if self.sr.provision == "thin": 

1373 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1374 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1375 elif self.sr.provision == "thick": 

1376 lvSize = lvhdutil.calcSizeVHDLV(int(size)) 

1377 

1378 self.sr._ensureSpaceAvailable(lvSize) 

1379 

1380 try: 

1381 self.sr.lvmCache.create(self.lvname, lvSize) 

1382 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1383 self.size = self.sr.lvmCache.getSize(self.lvname) 

1384 else: 

1385 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB) 

1386 self.size = vhdutil.getSizeVirt(self.path) 

1387 self.sr.lvmCache.deactivateNoRefcount(self.lvname) 

1388 except util.CommandException as e: 

1389 util.SMlog("Unable to create VDI") 

1390 self.sr.lvmCache.remove(self.lvname) 

1391 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code) 

1392 

1393 self.utilisation = lvSize 

1394 self.sm_config["vdi_type"] = self.vdi_type 

1395 

1396 if not self.sr.legacyMode: 

1397 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1398 

1399 self.ref = self._db_introduce() 

1400 self.sr._updateStats(self.sr.uuid, self.size) 

1401 

1402 vdi_info = {UUID_TAG: self.uuid, 

1403 NAME_LABEL_TAG: util.to_plain_string(self.label), 

1404 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), 

1405 IS_A_SNAPSHOT_TAG: 0, 

1406 SNAPSHOT_OF_TAG: '', 

1407 SNAPSHOT_TIME_TAG: '', 

1408 TYPE_TAG: self.ty, 

1409 VDI_TYPE_TAG: self.vdi_type, 

1410 READ_ONLY_TAG: int(self.read_only), 

1411 MANAGED_TAG: int(self.managed), 

1412 METADATA_OF_POOL_TAG: '' 

1413 } 

1414 

1415 if not self.sr.legacyMode: 

1416 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1417 

1418 return VDI.VDI.get_params(self) 

1419 

1420 @override 

1421 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None: 

1422 util.SMlog("LVHDVDI.delete for %s" % self.uuid) 

1423 try: 

1424 self._loadThis() 

1425 except xs_errors.SRException as e: 

1426 # Catch 'VDI doesn't exist' exception 

1427 if e.errno == 46: 

1428 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1429 raise 

1430 

1431 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1432 if not self.session.xenapi.VDI.get_managed(vdi_ref): 

1433 raise xs_errors.XenError("VDIDelete", \ 

1434 opterr="Deleting non-leaf node not permitted") 

1435 

1436 if not self.hidden: 

1437 self._markHidden() 

1438 

1439 if not data_only: 

1440 # Remove from XAPI and delete from MGT 

1441 self._db_forget() 

1442 else: 

1443 # If this is a data_destroy call, don't remove from XAPI db 

1444 # Only delete from MGT 

1445 if not self.sr.legacyMode: 

1446 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid) 

1447 

1448 # deactivate here because it might be too late to do it in the "final" 

1449 # step: GC might have removed the LV by then 

1450 if self.sr.lvActivator.get(self.uuid, False): 

1451 self.sr.lvActivator.deactivate(self.uuid, False) 

1452 

1453 try: 

1454 self.sr.lvmCache.remove(self.lvname) 

1455 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid) 

1456 self.sr.lock.cleanupAll(vdi_uuid) 

1457 except xs_errors.SRException as e: 

1458 util.SMlog( 

1459 "Failed to remove the volume (maybe is leaf coalescing) " 

1460 "for %s err:%d" % (self.uuid, e.errno)) 

1461 

1462 self.sr._updateStats(self.sr.uuid, -self.size) 

1463 self.sr._kickGC() 

1464 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1465 

1466 @override 

1467 def attach(self, sr_uuid, vdi_uuid) -> str: 

1468 util.SMlog("LVHDVDI.attach for %s" % self.uuid) 

1469 if self.sr.journaler.hasJournals(self.uuid): 

1470 raise xs_errors.XenError('VDIUnavailable', 

1471 opterr='Interrupted operation detected on this VDI, ' 

1472 'scan SR first to trigger auto-repair') 

1473 

1474 writable = ('args' not in self.sr.srcmd.params) or \ 

1475 (self.sr.srcmd.params['args'][0] == "true") 

1476 needInflate = True 

1477 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable: 

1478 needInflate = False 

1479 else: 

1480 self._loadThis() 

1481 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size): 

1482 needInflate = False 

1483 

1484 if needInflate: 

1485 try: 

1486 self._prepareThin(True) 

1487 except: 

1488 util.logException("attach") 

1489 raise xs_errors.XenError('LVMProvisionAttach') 

1490 

1491 try: 

1492 return self._attach() 

1493 finally: 

1494 if not self.sr.lvActivator.deactivateAll(): 

1495 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid) 

1496 

1497 @override 

1498 def detach(self, sr_uuid, vdi_uuid) -> None: 

1499 util.SMlog("LVHDVDI.detach for %s" % self.uuid) 

1500 self._loadThis() 

1501 already_deflated = (self.utilisation < \ 

1502 lvhdutil.calcSizeVHDLV(self.size)) 

1503 needDeflate = True 

1504 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated: 

1505 needDeflate = False 

1506 elif self.sr.provision == "thick": 

1507 needDeflate = False 

1508 # except for snapshots, which are always deflated 

1509 if self.sr.srcmd.cmd != 'vdi_detach_from_config': 

1510 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1511 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref) 

1512 if snap: 

1513 needDeflate = True 

1514 

1515 if needDeflate: 

1516 try: 

1517 self._prepareThin(False) 

1518 except: 

1519 util.logException("_prepareThin") 

1520 raise xs_errors.XenError('VDIUnavailable', opterr='deflate') 

1521 

1522 try: 

1523 self._detach() 

1524 finally: 

1525 if not self.sr.lvActivator.deactivateAll(): 

1526 raise xs_errors.XenError("SMGeneral", opterr="deactivation") 

1527 

1528 # We only support offline resize 

1529 @override 

1530 def resize(self, sr_uuid, vdi_uuid, size) -> str: 

1531 util.SMlog("LVHDVDI.resize for %s" % self.uuid) 

1532 if not self.sr.isMaster: 

1533 raise xs_errors.XenError('LVMMaster') 

1534 

1535 self._loadThis() 

1536 if self.hidden: 

1537 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

1538 

1539 if size < self.size: 

1540 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

1541 '(current size: %d, new size: %d)' % (self.size, size)) 

1542 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

1543 

1544 size = vhdutil.validate_and_round_vhd_size(int(size)) 

1545 

1546 if size == self.size: 

1547 return VDI.VDI.get_params(self) 

1548 

1549 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1550 lvSizeOld = self.size 

1551 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size) 

1552 else: 

1553 lvSizeOld = self.utilisation 

1554 lvSizeNew = lvhdutil.calcSizeVHDLV(size) 

1555 if self.sr.provision == "thin": 

1556 # VDI is currently deflated, so keep it deflated 

1557 lvSizeNew = lvSizeOld 

1558 assert(lvSizeNew >= lvSizeOld) 

1559 spaceNeeded = lvSizeNew - lvSizeOld 

1560 self.sr._ensureSpaceAvailable(spaceNeeded) 

1561 

1562 oldSize = self.size 

1563 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

1564 self.sr.lvmCache.setSize(self.lvname, lvSizeNew) 

1565 self.size = self.sr.lvmCache.getSize(self.lvname) 

1566 self.utilisation = self.size 

1567 else: 

1568 if lvSizeNew != lvSizeOld: 

1569 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, 

1570 lvSizeNew) 

1571 vhdutil.setSizeVirtFast(self.path, size) 

1572 self.size = vhdutil.getSizeVirt(self.path) 

1573 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

1574 

1575 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1576 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) 

1577 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

1578 str(self.utilisation)) 

1579 self.sr._updateStats(self.sr.uuid, self.size - oldSize) 

1580 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

1581 return VDI.VDI.get_params(self) 

1582 

1583 @override 

1584 def clone(self, sr_uuid, vdi_uuid) -> str: 

1585 return self._do_snapshot( 

1586 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True) 

1587 

1588 @override 

1589 def compose(self, sr_uuid, vdi1, vdi2) -> None: 

1590 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1)) 

1591 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1592 raise xs_errors.XenError('Unimplemented') 

1593 

1594 parent_uuid = vdi1 

1595 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid 

1596 assert(self.sr.lvmCache.checkLV(parent_lvname)) 

1597 parent_path = os.path.join(self.sr.path, parent_lvname) 

1598 

1599 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1600 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False) 

1601 

1602 vhdutil.setParent(self.path, parent_path, False) 

1603 vhdutil.setHidden(parent_path) 

1604 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

1605 

1606 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid, 

1607 True): 

1608 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

1609 

1610 util.SMlog("Compose done") 

1611 

1612 def reset_leaf(self, sr_uuid, vdi_uuid): 

1613 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid) 

1614 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

1615 raise xs_errors.XenError('Unimplemented') 

1616 

1617 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1618 

1619 # safety check 

1620 if not vhdutil.hasParent(self.path): 

1621 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

1622 "will not reset contents" % self.uuid) 

1623 

1624 vhdutil.killData(self.path) 

1625 

1626 def _attach(self): 

1627 self._chainSetActive(True, True, True) 

1628 if not util.pathexists(self.path): 

1629 raise xs_errors.XenError('VDIUnavailable', \ 

1630 opterr='Could not find: %s' % self.path) 

1631 

1632 if not hasattr(self, 'xenstore_data'): 

1633 self.xenstore_data = {} 

1634 

1635 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \ 

1636 scsiutil.gen_synthetic_page_data(self.uuid))) 

1637 

1638 self.xenstore_data['storage-type'] = 'lvm' 

1639 self.xenstore_data['vdi-type'] = self.vdi_type 

1640 

1641 self.attached = True 

1642 self.sr.lvActivator.persist() 

1643 return VDI.VDI.attach(self, self.sr.uuid, self.uuid) 

1644 

1645 def _detach(self): 

1646 self._chainSetActive(False, True) 

1647 self.attached = False 

1648 

1649 @override 

1650 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, 

1651 cloneOp=False, secondary=None, cbtlog=None) -> str: 

1652 # If cbt enabled, save file consistency state 

1653 if cbtlog is not None: 

1654 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1654 ↛ 1655line 1654 didn't jump to line 1655, because the condition on line 1654 was never true

1655 consistency_state = False 

1656 else: 

1657 consistency_state = True 

1658 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

1659 (consistency_state, vdi_uuid)) 

1660 else: 

1661 consistency_state = None 

1662 

1663 pause_time = time.time() 

1664 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1664 ↛ 1665line 1664 didn't jump to line 1665, because the condition on line 1664 was never true

1665 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

1666 

1667 snapResult = None 

1668 try: 

1669 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state) 

1670 except Exception as e1: 

1671 try: 

1672 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, 

1673 secondary=None) 

1674 except Exception as e2: 

1675 util.SMlog('WARNING: failed to clean up failed snapshot: ' 

1676 '%s (error ignored)' % e2) 

1677 raise 

1678 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary) 

1679 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

1680 unpause_time = time.time() 

1681 if (unpause_time - pause_time) > LONG_SNAPTIME: 1681 ↛ 1682line 1681 didn't jump to line 1682, because the condition on line 1681 was never true

1682 util.SMlog('WARNING: snapshot paused VM for %s seconds' % 

1683 (unpause_time - pause_time)) 

1684 return snapResult 

1685 

1686 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None): 

1687 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType)) 

1688 

1689 if not self.sr.isMaster: 1689 ↛ 1690line 1689 didn't jump to line 1690, because the condition on line 1689 was never true

1690 raise xs_errors.XenError('LVMMaster') 

1691 if self.sr.legacyMode: 1691 ↛ 1692line 1691 didn't jump to line 1692, because the condition on line 1691 was never true

1692 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode') 

1693 

1694 self._loadThis() 

1695 if self.hidden: 1695 ↛ 1696line 1695 didn't jump to line 1696, because the condition on line 1695 was never true

1696 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI') 

1697 

1698 self.sm_config = self.session.xenapi.VDI.get_sm_config( \ 

1699 self.sr.srcmd.params['vdi_ref']) 

1700 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1700 ↛ 1701line 1700 didn't jump to line 1701, because the condition on line 1700 was never true

1701 if not util.fistpoint.is_active("testsm_clone_allow_raw"): 

1702 raise xs_errors.XenError('Unimplemented', \ 

1703 opterr='Raw VDI, snapshot or clone not permitted') 

1704 

1705 # we must activate the entire VHD chain because the real parent could 

1706 # theoretically be anywhere in the chain if all VHDs under it are empty 

1707 self._chainSetActive(True, False) 

1708 if not util.pathexists(self.path): 1708 ↛ 1709line 1708 didn't jump to line 1709, because the condition on line 1708 was never true

1709 raise xs_errors.XenError('VDIUnavailable', \ 

1710 opterr='VDI unavailable: %s' % (self.path)) 

1711 

1712 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1712 ↛ 1720line 1712 didn't jump to line 1720, because the condition on line 1712 was never false

1713 depth = vhdutil.getDepth(self.path) 

1714 if depth == -1: 1714 ↛ 1715line 1714 didn't jump to line 1715, because the condition on line 1714 was never true

1715 raise xs_errors.XenError('VDIUnavailable', \ 

1716 opterr='failed to get VHD depth') 

1717 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1717 ↛ 1718line 1717 didn't jump to line 1718, because the condition on line 1717 was never true

1718 raise xs_errors.XenError('SnapshotChainTooLong') 

1719 

1720 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \ 

1721 self.sr.srcmd.params['vdi_ref']) 

1722 

1723 fullpr = lvhdutil.calcSizeVHDLV(self.size) 

1724 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \ 

1725 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) 

1726 lvSizeOrig = thinpr 

1727 lvSizeClon = thinpr 

1728 

1729 hostRefs = [] 

1730 if self.sr.cmd == "vdi_snapshot": 

1731 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid]) 

1732 if hostRefs: 1732 ↛ 1734line 1732 didn't jump to line 1734, because the condition on line 1732 was never false

1733 lvSizeOrig = fullpr 

1734 if self.sr.provision == "thick": 1734 ↛ 1740line 1734 didn't jump to line 1740, because the condition on line 1734 was never false

1735 if not self.issnap: 1735 ↛ 1736line 1735 didn't jump to line 1736, because the condition on line 1735 was never true

1736 lvSizeOrig = fullpr 

1737 if self.sr.cmd != "vdi_snapshot": 

1738 lvSizeClon = fullpr 

1739 

1740 if (snapType == VDI.SNAPSHOT_SINGLE or 1740 ↛ 1742line 1740 didn't jump to line 1742, because the condition on line 1740 was never true

1741 snapType == VDI.SNAPSHOT_INTERNAL): 

1742 lvSizeClon = 0 

1743 

1744 # the space required must include 2 journal LVs: a clone journal and an 

1745 # inflate journal (for the failure handling 

1746 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE 

1747 lvSizeBase = self.size 

1748 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1748 ↛ 1752line 1748 didn't jump to line 1752, because the condition on line 1748 was never false

1749 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, 

1750 vhdutil.getSizePhys(self.path)) 

1751 size_req -= (self.utilisation - lvSizeBase) 

1752 self.sr._ensureSpaceAvailable(size_req) 

1753 

1754 if hostRefs: 

1755 self.sr._updateSlavesPreClone(hostRefs, self.lvname) 

1756 

1757 baseUuid = util.gen_uuid() 

1758 origUuid = self.uuid 

1759 clonUuid = "" 

1760 if snapType == VDI.SNAPSHOT_DOUBLE: 1760 ↛ 1762line 1760 didn't jump to line 1762, because the condition on line 1760 was never false

1761 clonUuid = util.gen_uuid() 

1762 jval = "%s_%s" % (baseUuid, clonUuid) 

1763 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval) 

1764 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid) 

1765 

1766 try: 

1767 # self becomes the "base vdi" 

1768 origOldLV = self.lvname 

1769 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid 

1770 self.sr.lvmCache.rename(self.lvname, baseLV) 

1771 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False) 

1772 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1773 self.uuid = baseUuid 

1774 self.lvname = baseLV 

1775 self.path = os.path.join(self.sr.path, baseLV) 

1776 self.label = "base copy" 

1777 self.read_only = True 

1778 self.location = self.uuid 

1779 self.managed = False 

1780 

1781 # shrink the base copy to the minimum - we do it before creating 

1782 # the snapshot volumes to avoid requiring double the space 

1783 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1783 ↛ 1786line 1783 didn't jump to line 1786, because the condition on line 1783 was never false

1784 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase) 

1785 self.utilisation = lvSizeBase 

1786 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid) 

1787 

1788 snapVDI = self._createSnap(origUuid, lvSizeOrig, False) 

1789 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid) 

1790 snapVDI2 = None 

1791 if snapType == VDI.SNAPSHOT_DOUBLE: 1791 ↛ 1797line 1791 didn't jump to line 1797, because the condition on line 1791 was never false

1792 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True) 

1793 # If we have CBT enabled on the VDI, 

1794 # set CBT status for the new snapshot disk 

1795 if cbtlog: 

1796 snapVDI2.cbt_enabled = True 

1797 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid) 

1798 

1799 # note: it is important to mark the parent hidden only AFTER the 

1800 # new VHD children have been created, which are referencing it; 

1801 # otherwise we would introduce a race with GC that could reclaim 

1802 # the parent before we snapshot it 

1803 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1803 ↛ 1804line 1803 didn't jump to line 1804, because the condition on line 1803 was never true

1804 self.sr.lvmCache.setHidden(self.lvname) 

1805 else: 

1806 vhdutil.setHidden(self.path) 

1807 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid) 

1808 

1809 # set the base copy to ReadOnly 

1810 self.sr.lvmCache.setReadonly(self.lvname, True) 

1811 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid) 

1812 

1813 if hostRefs: 

1814 self.sr._updateSlavesOnClone(hostRefs, origOldLV, 

1815 snapVDI.lvname, self.uuid, self.lvname) 

1816 

1817 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

1818 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog: 

1819 snapVDI._cbt_snapshot(clonUuid, cbt_consistency) 

1820 if hostRefs: 1820 ↛ 1834line 1820 didn't jump to line 1834, because the condition on line 1820 was never false

1821 cbtlog_file = self._get_cbt_logname(snapVDI.uuid) 

1822 try: 

1823 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file) 

1824 except: 

1825 alert_name = "VDI_CBT_SNAPSHOT_FAILED" 

1826 alert_str = ("Creating CBT snapshot for {} failed" 

1827 .format(snapVDI.uuid)) 

1828 snapVDI._disable_cbt_on_error(alert_name, alert_str) 

1829 pass 

1830 

1831 except (util.SMException, XenAPI.Failure) as e: 

1832 util.logException("LVHDVDI._snapshot") 

1833 self._failClone(origUuid, jval, str(e)) 

1834 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid) 

1835 

1836 self.sr.journaler.remove(self.JRN_CLONE, origUuid) 

1837 

1838 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType) 

1839 

1840 def _createSnap(self, snapUuid, snapSizeLV, isNew): 

1841 """Snapshot self and return the snapshot VDI object""" 

1842 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid 

1843 snapPath = os.path.join(self.sr.path, snapLV) 

1844 self.sr.lvmCache.create(snapLV, int(snapSizeLV)) 

1845 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid) 

1846 if isNew: 

1847 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1848 self.sr.lvActivator.add(snapUuid, snapLV, False) 

1849 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW) 

1850 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB) 

1851 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid) 

1852 

1853 snapVDI = LVHDVDI(self.sr, snapUuid) 

1854 snapVDI.read_only = False 

1855 snapVDI.location = snapUuid 

1856 snapVDI.size = self.size 

1857 snapVDI.utilisation = snapSizeLV 

1858 snapVDI.sm_config = dict() 

1859 for key, val in self.sm_config.items(): 1859 ↛ 1860line 1859 didn't jump to line 1860, because the loop on line 1859 never started

1860 if key not in [ 

1861 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \ 

1862 not key.startswith("host_"): 

1863 snapVDI.sm_config[key] = val 

1864 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD 

1865 snapVDI.sm_config["vhd-parent"] = snapParent 

1866 snapVDI.lvname = snapLV 

1867 return snapVDI 

1868 

1869 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None): 

1870 if snapType is not VDI.SNAPSHOT_INTERNAL: 1870 ↛ 1872line 1870 didn't jump to line 1872, because the condition on line 1870 was never false

1871 self.sr._updateStats(self.sr.uuid, self.size) 

1872 basePresent = True 

1873 

1874 # Verify parent locator field of both children and delete basePath if 

1875 # unused 

1876 snapParent = snapVDI.sm_config["vhd-parent"] 

1877 snap2Parent = "" 

1878 if snapVDI2: 1878 ↛ 1880line 1878 didn't jump to line 1880, because the condition on line 1878 was never false

1879 snap2Parent = snapVDI2.sm_config["vhd-parent"] 

1880 if snapParent != self.uuid and \ 1880 ↛ 1907line 1880 didn't jump to line 1907, because the condition on line 1880 was never false

1881 (not snapVDI2 or snap2Parent != self.uuid): 

1882 util.SMlog("%s != %s != %s => deleting unused base %s" % \ 

1883 (snapParent, self.uuid, snap2Parent, self.lvname)) 

1884 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

1885 self.sr.lvmCache.remove(self.lvname) 

1886 self.sr.lvActivator.remove(self.uuid, False) 

1887 if hostRefs: 

1888 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname) 

1889 basePresent = False 

1890 else: 

1891 # assign the _binary_ refcount of the original VDI to the new base 

1892 # VDI (but as the normal refcount, since binary refcounts are only 

1893 # for leaf nodes). The normal refcount of the child is not 

1894 # transferred to to the base VDI because normal refcounts are 

1895 # incremented and decremented individually, and not based on the 

1896 # VHD chain (i.e., the child's normal refcount will be decremented 

1897 # independently of its parent situation). Add 1 for this clone op. 

1898 # Note that we do not need to do protect the refcount operations 

1899 # below with per-VDI locking like we do in lvutil because at this 

1900 # point we have exclusive access to the VDIs involved. Other SM 

1901 # operations are serialized by the Agent or with the SR lock, and 

1902 # any coalesce activations are serialized with the SR lock. (The 

1903 # coalesce activates the coalesced VDI pair in the beginning, which 

1904 # cannot affect the VDIs here because they cannot possibly be 

1905 # involved in coalescing at this point, and at the relinkSkip step 

1906 # that activates the children, which takes the SR lock.) 

1907 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid 

1908 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns) 

1909 RefCounter.set(self.uuid, bcnt + 1, 0, ns) 

1910 

1911 # the "paused" and "host_*" sm-config keys are special and must stay on 

1912 # the leaf without being inherited by anyone else 

1913 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1913 ↛ 1914line 1913 didn't jump to line 1914, because the loop on line 1913 never started

1914 snapVDI.sm_config[key] = self.sm_config[key] 

1915 del self.sm_config[key] 

1916 

1917 # Introduce any new VDI records & update the existing one 

1918 type = self.session.xenapi.VDI.get_type( \ 

1919 self.sr.srcmd.params['vdi_ref']) 

1920 if snapVDI2: 1920 ↛ 1962line 1920 didn't jump to line 1962, because the condition on line 1920 was never false

1921 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1922 vdiRef = snapVDI2._db_introduce() 

1923 if cloneOp: 

1924 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1925 NAME_LABEL_TAG: util.to_plain_string( \ 

1926 self.session.xenapi.VDI.get_name_label( \ 

1927 self.sr.srcmd.params['vdi_ref'])), 

1928 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1929 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1930 IS_A_SNAPSHOT_TAG: 0, 

1931 SNAPSHOT_OF_TAG: '', 

1932 SNAPSHOT_TIME_TAG: '', 

1933 TYPE_TAG: type, 

1934 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1935 READ_ONLY_TAG: 0, 

1936 MANAGED_TAG: int(snapVDI2.managed), 

1937 METADATA_OF_POOL_TAG: '' 

1938 } 

1939 else: 

1940 util.SMlog("snapshot VDI params: %s" % \ 

1941 self.session.xenapi.VDI.get_snapshot_time(vdiRef)) 

1942 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1943 NAME_LABEL_TAG: util.to_plain_string( \ 

1944 self.session.xenapi.VDI.get_name_label( \ 

1945 self.sr.srcmd.params['vdi_ref'])), 

1946 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1947 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1948 IS_A_SNAPSHOT_TAG: 1, 

1949 SNAPSHOT_OF_TAG: snapVDI.uuid, 

1950 SNAPSHOT_TIME_TAG: '', 

1951 TYPE_TAG: type, 

1952 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1953 READ_ONLY_TAG: 0, 

1954 MANAGED_TAG: int(snapVDI2.managed), 

1955 METADATA_OF_POOL_TAG: '' 

1956 } 

1957 

1958 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1959 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \ 

1960 (vdiRef, snapVDI2.uuid)) 

1961 

1962 if basePresent: 1962 ↛ 1963line 1962 didn't jump to line 1963, because the condition on line 1962 was never true

1963 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1964 vdiRef = self._db_introduce() 

1965 vdi_info = {UUID_TAG: self.uuid, 

1966 NAME_LABEL_TAG: self.label, 

1967 NAME_DESCRIPTION_TAG: self.description, 

1968 IS_A_SNAPSHOT_TAG: 0, 

1969 SNAPSHOT_OF_TAG: '', 

1970 SNAPSHOT_TIME_TAG: '', 

1971 TYPE_TAG: type, 

1972 VDI_TYPE_TAG: self.sm_config['vdi_type'], 

1973 READ_ONLY_TAG: 1, 

1974 MANAGED_TAG: 0, 

1975 METADATA_OF_POOL_TAG: '' 

1976 } 

1977 

1978 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1979 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \ 

1980 (vdiRef, self.uuid)) 

1981 

1982 # Update the original record 

1983 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1984 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config) 

1985 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \ 

1986 str(snapVDI.utilisation)) 

1987 

1988 # Return the info on the new snap VDI 

1989 snap = snapVDI2 

1990 if not snap: 1990 ↛ 1991line 1990 didn't jump to line 1991, because the condition on line 1990 was never true

1991 snap = self 

1992 if not basePresent: 

1993 # a single-snapshot of an empty VDI will be a noop, resulting 

1994 # in no new VDIs, so return the existing one. The GC wouldn't 

1995 # normally try to single-snapshot an empty VHD of course, but 

1996 # if an external snapshot operation manages to sneak in right 

1997 # before a snapshot-coalesce phase, we would get here 

1998 snap = snapVDI 

1999 return snap.get_params() 

2000 

2001 def _initFromVDIInfo(self, vdiInfo): 

2002 self.vdi_type = vdiInfo.vdiType 

2003 self.lvname = vdiInfo.lvName 

2004 self.size = vdiInfo.sizeVirt 

2005 self.utilisation = vdiInfo.sizeLV 

2006 self.hidden = vdiInfo.hidden 

2007 if self.hidden: 2007 ↛ 2008line 2007 didn't jump to line 2008, because the condition on line 2007 was never true

2008 self.managed = False 

2009 self.active = vdiInfo.lvActive 

2010 self.readonly = vdiInfo.lvReadonly 

2011 self.parent = vdiInfo.parentUuid 

2012 self.path = os.path.join(self.sr.path, self.lvname) 

2013 if hasattr(self, "sm_config_override"): 2013 ↛ 2016line 2013 didn't jump to line 2016, because the condition on line 2013 was never false

2014 self.sm_config_override["vdi_type"] = self.vdi_type 

2015 else: 

2016 self.sm_config_override = {'vdi_type': self.vdi_type} 

2017 self.loaded = True 

2018 

2019 def _initFromLVInfo(self, lvInfo): 

2020 self.vdi_type = lvInfo.vdiType 

2021 self.lvname = lvInfo.name 

2022 self.size = lvInfo.size 

2023 self.utilisation = lvInfo.size 

2024 self.hidden = lvInfo.hidden 

2025 self.active = lvInfo.active 

2026 self.readonly = lvInfo.readonly 

2027 self.parent = '' 

2028 self.path = os.path.join(self.sr.path, self.lvname) 

2029 if hasattr(self, "sm_config_override"): 2029 ↛ 2032line 2029 didn't jump to line 2032, because the condition on line 2029 was never false

2030 self.sm_config_override["vdi_type"] = self.vdi_type 

2031 else: 

2032 self.sm_config_override = {'vdi_type': self.vdi_type} 

2033 if 'vhd-parent' in self.sm_config_override: 2033 ↛ 2034line 2033 didn't jump to line 2034, because the condition on line 2033 was never true

2034 self.parent = self.sm_config_override['vhd-parent'] 

2035 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2035 ↛ 2036line 2035 didn't jump to line 2036, because the condition on line 2035 was never true

2036 self.loaded = True 

2037 

2038 def _initFromVHDInfo(self, vhdInfo): 

2039 self.size = vhdInfo.sizeVirt 

2040 if (self.parent == '' or (vhdInfo.parentUuid != '' 2040 ↛ 2043line 2040 didn't jump to line 2043, because the condition on line 2040 was never false

2041 and vhdInfo.parentUuid != self.parent)): 

2042 self.parent = vhdInfo.parentUuid 

2043 self.hidden = vhdInfo.hidden 

2044 self.loaded = True 

2045 

2046 def _determineType(self): 

2047 """Determine whether this is a raw or a VHD VDI""" 

2048 if "vdi_ref" in self.sr.srcmd.params: 

2049 vdi_ref = self.sr.srcmd.params["vdi_ref"] 

2050 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

2051 if sm_config.get("vdi_type"): 2051 ↛ 2052line 2051 didn't jump to line 2052, because the condition on line 2051 was never true

2052 self.vdi_type = sm_config["vdi_type"] 

2053 prefix = lvhdutil.LV_PREFIX[self.vdi_type] 

2054 self.lvname = "%s%s" % (prefix, self.uuid) 

2055 self.path = os.path.join(self.sr.path, self.lvname) 

2056 self.sm_config_override = sm_config 

2057 return True 

2058 

2059 # LVM commands can be costly, so check the file directly first in case 

2060 # the LV is active 

2061 found = False 

2062 for t in lvhdutil.VDI_TYPES: 2062 ↛ 2063line 2062 didn't jump to line 2063, because the loop on line 2062 never started

2063 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid) 

2064 path = os.path.join(self.sr.path, lvname) 

2065 if util.pathexists(path): 

2066 if found: 

2067 raise xs_errors.XenError('VDILoad', 

2068 opterr="multiple VDI's: uuid %s" % self.uuid) 

2069 found = True 

2070 self.vdi_type = t 

2071 self.lvname = lvname 

2072 self.path = path 

2073 if found: 2073 ↛ 2074line 2073 didn't jump to line 2074, because the condition on line 2073 was never true

2074 return True 

2075 

2076 # now list all LV's 

2077 if not lvutil._checkVG(self.sr.vgname): 2077 ↛ 2079line 2077 didn't jump to line 2079, because the condition on line 2077 was never true

2078 # when doing attach_from_config, the VG won't be there yet 

2079 return False 

2080 

2081 lvs = lvhdutil.getLVInfo(self.sr.lvmCache) 

2082 if lvs.get(self.uuid): 

2083 self._initFromLVInfo(lvs[self.uuid]) 

2084 return True 

2085 return False 

2086 

2087 def _loadThis(self): 

2088 """Load VDI info for this VDI and activate the LV if it's VHD. We 

2089 don't do it in VDI.load() because not all VDI operations need it.""" 

2090 if self.loaded: 2090 ↛ 2091line 2090 didn't jump to line 2091, because the condition on line 2090 was never true

2091 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

2092 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2093 return 

2094 try: 

2095 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname) 

2096 except util.CommandException as e: 

2097 raise xs_errors.XenError('VDIUnavailable', 

2098 opterr='%s (LV scan error)' % os.strerror(abs(e.code))) 

2099 if not lvs.get(self.uuid): 2099 ↛ 2100line 2099 didn't jump to line 2100, because the condition on line 2099 was never true

2100 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found') 

2101 self._initFromLVInfo(lvs[self.uuid]) 

2102 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2102 ↛ 2109line 2102 didn't jump to line 2109, because the condition on line 2102 was never false

2103 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2104 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False) 

2105 if not vhdInfo: 2105 ↛ 2106line 2105 didn't jump to line 2106, because the condition on line 2105 was never true

2106 raise xs_errors.XenError('VDIUnavailable', \ 

2107 opterr='getVHDInfo failed') 

2108 self._initFromVHDInfo(vhdInfo) 

2109 self.loaded = True 

2110 

2111 def _chainSetActive(self, active, binary, persistent=False): 

2112 if binary: 2112 ↛ 2113line 2112 didn't jump to line 2113, because the condition on line 2112 was never true

2113 (count, bcount) = RefCounter.checkLocked(self.uuid, 

2114 lvhdutil.NS_PREFIX_LVM + self.sr.uuid) 

2115 if (active and bcount > 0) or (not active and bcount == 0): 

2116 return # this is a redundant activation/deactivation call 

2117 

2118 vdiList = {self.uuid: self.lvname} 

2119 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2119 ↛ 2122line 2119 didn't jump to line 2122, because the condition on line 2119 was never false

2120 vdiList = vhdutil.getParentChain(self.lvname, 

2121 lvhdutil.extractUuid, self.sr.vgname) 

2122 for uuid, lvName in vdiList.items(): 2122 ↛ 2123line 2122 didn't jump to line 2123, because the loop on line 2122 never started

2123 binaryParam = binary 

2124 if uuid != self.uuid: 

2125 binaryParam = False # binary param only applies to leaf nodes 

2126 if active: 

2127 self.sr.lvActivator.activate(uuid, lvName, binaryParam, 

2128 persistent) 

2129 else: 

2130 # just add the LVs for deactivation in the final (cleanup) 

2131 # step. The LVs must not have been activated during the current 

2132 # operation 

2133 self.sr.lvActivator.add(uuid, lvName, binaryParam) 

2134 

2135 def _failClone(self, uuid, jval, msg): 

2136 try: 

2137 self.sr._handleInterruptedCloneOp(uuid, jval, True) 

2138 self.sr.journaler.remove(self.JRN_CLONE, uuid) 

2139 except Exception as e: 

2140 util.SMlog('WARNING: failed to clean up failed snapshot: ' \ 

2141 ' %s (error ignored)' % e) 

2142 raise xs_errors.XenError('VDIClone', opterr=msg) 

2143 

2144 def _markHidden(self): 

2145 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

2146 self.sr.lvmCache.setHidden(self.lvname) 

2147 else: 

2148 vhdutil.setHidden(self.path) 

2149 self.hidden = 1 

2150 

2151 def _prepareThin(self, attach): 

2152 origUtilisation = self.sr.lvmCache.getSize(self.lvname) 

2153 if self.sr.isMaster: 

2154 # the master can prepare the VDI locally 

2155 if attach: 

2156 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid) 

2157 else: 

2158 lvhdutil.detachThin(self.session, self.sr.lvmCache, 

2159 self.sr.uuid, self.uuid) 

2160 else: 

2161 fn = "attach" 

2162 if not attach: 

2163 fn = "detach" 

2164 pools = self.session.xenapi.pool.get_all() 

2165 master = self.session.xenapi.pool.get_master(pools[0]) 

2166 rv = self.session.xenapi.host.call_plugin( 

2167 master, self.sr.THIN_PLUGIN, fn, 

2168 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid}) 

2169 util.SMlog("call-plugin returned: %s" % rv) 

2170 if not rv: 

2171 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN) 

2172 # refresh to pick up the size change on this slave 

2173 self.sr.lvmCache.activateNoRefcount(self.lvname, True) 

2174 

2175 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

2176 if origUtilisation != self.utilisation: 

2177 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2178 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

2179 str(self.utilisation)) 

2180 stats = lvutil._getVGstats(self.sr.vgname) 

2181 sr_utilisation = stats['physical_utilisation'] 

2182 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref, 

2183 str(sr_utilisation)) 

2184 

2185 @override 

2186 def update(self, sr_uuid, vdi_uuid) -> None: 

2187 if self.sr.legacyMode: 

2188 return 

2189 

2190 #Synch the name_label of this VDI on storage with the name_label in XAPI 

2191 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid) 

2192 update_map = {} 

2193 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

2194 METADATA_OBJECT_TYPE_VDI 

2195 update_map[UUID_TAG] = self.uuid 

2196 update_map[NAME_LABEL_TAG] = util.to_plain_string( \ 

2197 self.session.xenapi.VDI.get_name_label(vdi_ref)) 

2198 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \ 

2199 self.session.xenapi.VDI.get_name_description(vdi_ref)) 

2200 update_map[SNAPSHOT_TIME_TAG] = \ 

2201 self.session.xenapi.VDI.get_snapshot_time(vdi_ref) 

2202 update_map[METADATA_OF_POOL_TAG] = \ 

2203 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref) 

2204 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map) 

2205 

2206 @override 

2207 def _ensure_cbt_space(self) -> None: 

2208 self.sr.ensureCBTSpace() 

2209 

2210 @override 

2211 def _create_cbt_log(self) -> str: 

2212 logname = self._get_cbt_logname(self.uuid) 

2213 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG) 

2214 logpath = super(LVHDVDI, self)._create_cbt_log() 

2215 self.sr.lvmCache.deactivateNoRefcount(logname) 

2216 return logpath 

2217 

2218 @override 

2219 def _delete_cbt_log(self) -> None: 

2220 logpath = self._get_cbt_logpath(self.uuid) 

2221 if self._cbt_log_exists(logpath): 

2222 logname = self._get_cbt_logname(self.uuid) 

2223 self.sr.lvmCache.remove(logname) 

2224 

2225 @override 

2226 def _rename(self, oldpath, newpath) -> None: 

2227 oldname = os.path.basename(oldpath) 

2228 newname = os.path.basename(newpath) 

2229 self.sr.lvmCache.rename(oldname, newname) 

2230 

2231 @override 

2232 def update_slaves_on_cbt_disable(self, cbtlog) -> None: 

2233 args = { 

2234 "vgName": self.sr.vgname, 

2235 "action1": "deactivateNoRefcount", 

2236 "lvName1": cbtlog 

2237 } 

2238 

2239 host_refs = util.get_hosts_attached_on(self.session, [self.uuid]) 

2240 

2241 message = f"Deactivating {cbtlog}" 

2242 self.sr.call_on_slave(args, host_refs, message) 

2243 

2244 @override 

2245 def _activate_cbt_log(self, lv_name) -> bool: 

2246 self.sr.lvmCache.refresh() 

2247 if not self.sr.lvmCache.is_active(lv_name): 2247 ↛ 2248line 2247 didn't jump to line 2248, because the condition on line 2247 was never true

2248 try: 

2249 self.sr.lvmCache.activateNoRefcount(lv_name) 

2250 return True 

2251 except Exception as e: 

2252 util.SMlog("Exception in _activate_cbt_log, " 

2253 "Error: %s." % str(e)) 

2254 raise 

2255 else: 

2256 return False 

2257 

2258 @override 

2259 def _deactivate_cbt_log(self, lv_name) -> None: 

2260 try: 

2261 self.sr.lvmCache.deactivateNoRefcount(lv_name) 

2262 except Exception as e: 

2263 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e)) 

2264 raise 

2265 

2266 @override 

2267 def _cbt_log_exists(self, logpath) -> bool: 

2268 return lvutil.exists(logpath) 

2269 

2270if __name__ == '__main__': 2270 ↛ 2271line 2270 didn't jump to line 2271, because the condition on line 2270 was never true

2271 SRCommand.run(LVHDSR, DRIVER_INFO) 

2272else: 

2273 SR.registerSR(LVHDSR)