Kailash Khalasi | deeead0 | 2018-04-11 13:27:27 -0700 | [diff] [blame^] | 1 | // Copyright 2017-present Open Networking Foundation |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | import groovy.json.JsonSlurperClassic |
| 16 | |
| 17 | def filename = 'manifest-${GERRIT_BRANCH}.xml' |
| 18 | def manifestUrl = 'https://gerrit.opencord.org/manifest' |
| 19 | deployment_config = null; |
| 20 | pod_config = null; |
| 21 | |
| 22 | node ("${devNodeName}") { |
| 23 | timeout (time: 240) { |
| 24 | stage ("Clean-up previous build") { |
| 25 | sh "make -C build clean-all || true" |
| 26 | sh "rm -rf *" |
| 27 | } |
| 28 | stage ("Parse deployment configuration file") { |
| 29 | sh returnStdout: true, script: 'rm -rf ${configRepoBaseDir}' |
| 30 | sh returnStdout: true, script: 'git clone -b ${GERRIT_BRANCH} ${configRepoUrl}' |
| 31 | deployment_config = readYaml file: "${configRepoBaseDir}${configRepoFile}" |
| 32 | pod_config = readYaml file: "${configRepoBaseDir}${deployment_config.pod_config.file_name}" |
| 33 | } |
| 34 | stage ('Remove old head node from known hosts') { |
| 35 | sh "ssh-keygen -R ${deployment_config.head.ip}" |
| 36 | } |
| 37 | stage ('Checkout cord repo') { |
| 38 | checkout changelog: false, poll: false, scm: [$class: 'RepoScm', currentBranch: true, manifestBranch: params.branch, manifestRepositoryUrl: "${manifestUrl}", quiet: true] |
| 39 | } |
| 40 | |
| 41 | try { |
| 42 | dir('build') { |
| 43 | stage ("Re-deploy head node") { |
| 44 | maasOps: { |
| 45 | sh "maas login maas http://${deployment_config.maas.ip}/MAAS/api/2.0 ${deployment_config.maas.api_key}" |
| 46 | sh "maas maas machine release ${deployment_config.maas.head_system_id}" |
| 47 | |
| 48 | timeout(time: 15) { |
| 49 | waitUntil { |
| 50 | try { |
| 51 | sh "maas maas machine read ${deployment_config.maas.head_system_id} | grep Ready" |
| 52 | return true |
| 53 | } catch (exception) { |
| 54 | return false |
| 55 | } |
| 56 | } |
| 57 | } |
| 58 | |
| 59 | sh 'maas maas machines allocate' |
| 60 | sh "maas maas machine deploy ${deployment_config.maas.head_system_id}" |
| 61 | |
| 62 | timeout(time: 30) { |
| 63 | waitUntil { |
| 64 | try { |
| 65 | sh "maas maas machine read ${deployment_config.maas.head_system_id} | grep Deployed" |
| 66 | return true |
| 67 | } catch (exception) { |
| 68 | return false |
| 69 | } |
| 70 | } |
| 71 | } |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | stage ("Download CORD POD configuration") { |
| 76 | sh "cd ../orchestration/profiles; git clone -b ${GERRIT_BRANCH} ${deployment_config.pod_config.repo_url} automation" |
| 77 | } |
| 78 | |
| 79 | stage ("Generate CORD configuration") { |
| 80 | sh "make PODCONFIG_PATH=../orchestration/profiles/automation/${deployment_config.pod_config.file_name} config" |
| 81 | } |
| 82 | |
| 83 | if (deployment_config.fabric_switches != null) { |
| 84 | stage("Reserve IPs for fabric switches") { |
| 85 | for(int i=0; i < deployment_config.fabric_switches.size(); i++) { |
| 86 | def str = createMACIPbindingStr("fabric", i+1, |
| 87 | "${deployment_config.fabric_switches[i].mac}", |
| 88 | "${deployment_config.fabric_switches[i].ip}") |
| 89 | sh "echo $str >> maas/roles/maas/files/dhcpd.reservations" |
| 90 | } |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | stage ("Deploy") { |
| 95 | sh "make build" |
| 96 | } |
| 97 | |
| 98 | if (deployment_config.compute_nodes != null) { |
| 99 | stage ("Power cycle compute nodes") { |
| 100 | for(int i=0; i < deployment_config.compute_nodes.size(); i++) { |
| 101 | sh "ipmitool -U ${deployment_config.compute_nodes[i].ipmi.user} -P ${deployment_config.compute_nodes[i].ipmi.pass} -H ${deployment_config.compute_nodes[i].ipmi.ip} power cycle" |
| 102 | } |
| 103 | } |
| 104 | |
| 105 | stage ("Wait for compute nodes to get deployed") { |
| 106 | sh "ssh-keygen -f /home/${deployment_config.dev_node.user}/.ssh/known_hosts -R ${deployment_config.head.ip}" |
| 107 | def cordApiKey = runHeadNodeCmd("sudo maas-region-admin apikey --username ${deployment_config.head.user}") |
| 108 | runHeadNodeCmd("maas login pod-maas http://${deployment_config.head.ip}/MAAS/api/1.0 ${cordApiKey}") |
| 109 | timeout(time: 90) { |
| 110 | waitUntil { |
| 111 | try { |
| 112 | num = runHeadNodeCmd("maas pod-maas nodes list | grep substatus_name | grep -i deployed | wc -l").trim() |
| 113 | return num.toInteger() == deployment_config.compute_nodes.size() |
| 114 | } catch (exception) { |
| 115 | return false |
| 116 | } |
| 117 | } |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | stage ("Wait for compute nodes to be provisioned") { |
| 122 | timeout(time:45) { |
| 123 | waitUntil { |
| 124 | try { |
| 125 | num = runHeadNodeCmd("cord prov list | grep -i node | grep -i complete | wc -l").trim() |
| 126 | return num.toInteger() == deployment_config.compute_nodes.size() |
| 127 | } catch (exception) { |
| 128 | return false |
| 129 | } |
| 130 | } |
| 131 | } |
| 132 | } |
| 133 | } |
| 134 | |
| 135 | if (deployment_config.fabric_switches != null) { |
| 136 | stage ("Wait for fabric switches to get deployed") { |
| 137 | runFabricSwitchCmdAll("sudo onl-onie-boot-mode install") |
| 138 | runFabricSwitchCmdAll("sudo reboot") |
| 139 | // Ensure that switches get provisioned after ONIE reinstall. |
| 140 | // Delete them if they were provisioned earlier. If the switches are not |
| 141 | // present in 'cord prov list', this command has no effect. |
| 142 | for(int i=0; i < deployment_config.fabric_switches.size(); i++) { |
| 143 | runHeadNodeCmd("cord prov delete ${deployment_config.fabric_switches[i].mac}") |
| 144 | } |
| 145 | timeout(time: 45) { |
| 146 | waitUntil { |
| 147 | try { |
| 148 | def harvestCompleted = runHeadNodeCmd("cord harvest list | grep -i fabric | wc -l").trim() |
| 149 | return harvestCompleted.toInteger() == deployment_config.fabric_switches.size() |
| 150 | } catch (exception) { |
| 151 | return false |
| 152 | } |
| 153 | } |
| 154 | } |
| 155 | } |
| 156 | |
| 157 | stage ("Wait for fabric switches to be provisioned") { |
| 158 | timeout(time:45) { |
| 159 | waitUntil { |
| 160 | try { |
| 161 | def provCompleted = 0 |
| 162 | for(int i=0; i < deployment_config.fabric_switches.size(); i++) { |
| 163 | def count = runHeadNodeCmd("cord prov list | grep -i ${deployment_config.fabric_switches[i].ip} | grep -i complete | wc -l").trim() |
| 164 | provCompleted = provCompleted + count.toInteger() |
| 165 | } |
| 166 | return provCompleted == deployment_config.fabric_switches.size() |
| 167 | } catch (exception) { |
| 168 | return false |
| 169 | } |
| 170 | } |
| 171 | } |
| 172 | } |
| 173 | |
| 174 | // Post installation configuration starts here |
| 175 | fabricIpPrefix = pod_config.fabric_ip.split(/\.\d+\.\d+\/24/)[0] |
| 176 | xosUser = "xosadmin@opencord.org" |
| 177 | xosPass = runHeadNodeCmd("cat /opt/credentials/xosadmin@opencord.org").trim() |
| 178 | stage ("Connect fabric switches and compute nodes to ONOS") { |
| 179 | // Configure breakout ports |
| 180 | for(int i=0; i < deployment_config.fabric_switches.size(); i++) { |
| 181 | if (deployment_config.fabric_switches[i].breakout_ports != null) { |
| 182 | for(int j=0; j < deployment_config.fabric_switches[i].breakout_ports.size(); j++) { |
| 183 | runFabricSwitchCmd("${deployment_config.fabric_switches[i].ip}", |
| 184 | "${deployment_config.fabric_switches[i].user}", |
| 185 | "${deployment_config.fabric_switches[i].pass}", |
| 186 | "sed -i -e 's/#port_mode_${deployment_config.fabric_switches[i].breakout_ports[j]}=/port_mode_${deployment_config.fabric_switches[i].breakout_ports[j]}=/g' /etc/accton/ofdpa.conf") |
| 187 | } |
| 188 | } |
| 189 | } |
| 190 | connectFabricAndComputeToOnos() |
| 191 | } |
| 192 | stage ("Configure the compute nodes") { |
| 193 | leafSwitchNum = 0 |
| 194 | for(int i=0; i < deployment_config.fabric_switches.size(); i++) { |
| 195 | if(deployment_config.fabric_switches[i].containsKey("role")) { |
| 196 | if(deployment_config.fabric_switches[i].role.toLowerCase().contains("leaf")) { |
| 197 | leafSwitchNum += 1 |
| 198 | } |
| 199 | } |
| 200 | } |
| 201 | for(int i=1; i<=leafSwitchNum; i++) { |
| 202 | // Figure out which compute node connects to which switch |
| 203 | leafName = "leaf-" + i.toString() |
| 204 | computeNames = getComputeNames(leafName) |
| 205 | echo "Compute nodes connnected to " + leafName + " switch:" |
| 206 | for(name in computeNames) { echo "${name}" } |
| 207 | index = 1 |
| 208 | for(name in computeNames) { |
| 209 | if(i>1) { |
| 210 | //Update fabric IP of compute nodes |
| 211 | index += 1 |
| 212 | fabricIp = runComputeNodeCmd("${name}", "ip a | grep -o '${fabricIpPrefix}.[1-9][0-9]*.[0-9]*/24'").trim() |
| 213 | if (fabricIp != "") { |
| 214 | nodeId = sh(returnStdout: true, script: "curl -u ${xosUser}:${xosPass} -X GET http://${deployment_config.head.ip}/xosapi/v1/core/nodes | jq '.[\"items\"][] | select(.dataPlaneIp==\"${fabricIp}\") | .id'").trim() |
| 215 | newFabricIp = "${fabricIpPrefix}." + i.toString() + "." + index.toString() + "/24" |
| 216 | out = sh(returnStdout: true, script: "curl -u ${xosUser}:${xosPass} -X PUT -d '{\"dataPlaneIp\":\"${newFabricIp}\"}' http://${deployment_config.head.ip}/xosapi/v1/core/nodes/${nodeId}").trim() |
| 217 | // Wait until the new fabric IP gets configured |
| 218 | timeout(time: 5) { |
| 219 | waitUntil { |
| 220 | try { |
| 221 | num = runComputeNodeCmd("${name}", "ip a | grep " + newFabricIp + " | wc -l").trim() |
| 222 | return num.toInteger() == 1 |
| 223 | } catch (exception) { |
| 224 | return false |
| 225 | } |
| 226 | } |
| 227 | } |
| 228 | } |
| 229 | else echo "Cannot find fabric IP matching pattern ${fabricIpPrefix}.[1-9][0-9]*.[0-9]*" |
| 230 | } |
| 231 | //Add routes to fabric subnets |
| 232 | for(int j=1; j<=leafSwitchNum; j++) { |
| 233 | if(j!=i) { |
| 234 | runComputeNodeCmd("${name}", "sudo ip route add ${fabricIpPrefix}." + j.toString() + ".0/24 via ${fabricIpPrefix}." + i.toString() + ".254 || echo route already exists") |
| 235 | } |
| 236 | } |
| 237 | } |
| 238 | } |
| 239 | } |
| 240 | stage ("Generate and load network configuration") { |
| 241 | // Reconnect compute nodes to update the fabric IP in ONOS |
| 242 | connectFabricAndComputeToOnos() |
| 243 | // Refresh fabric configurations |
| 244 | sh "make fabric-refresh" |
| 245 | } |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | if (deployment_config.make_release == true) { |
| 250 | stage ("Trigger Build") { |
| 251 | url = 'https://jenkins.opencord.org/job/release-build/job/' + params.branch + '/build' |
| 252 | httpRequest authentication: 'auto-release', httpMode: 'POST', url: url, validResponseCodes: '201' |
| 253 | } |
| 254 | } |
| 255 | |
| 256 | currentBuild.result = 'SUCCESS' |
| 257 | } catch (err) { |
| 258 | currentBuild.result = 'FAILURE' |
| 259 | step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false]) |
| 260 | } |
| 261 | echo "RESULT: ${currentBuild.result}" |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | /** |
| 266 | * Returns a string used to bind IPs and MAC addresses, substituting the values |
| 267 | * given. |
| 268 | * |
| 269 | * @param word the word used to generate the host name |
| 270 | * @param counter the counter used to generate the host name |
| 271 | * @param mac the MAC address to substitute |
| 272 | * @param ip the IP address to substitute |
| 273 | */ |
| 274 | def createMACIPbindingStr(word, counter, mac, ip) { |
| 275 | return """host ${word}-${counter} {'\n'hardware ethernet ${mac}';''\n'fixed-address ${ip}';''\n'}""" |
| 276 | } |
| 277 | |
| 278 | /** |
| 279 | * Runs a command on a remote host using sshpass. |
| 280 | * |
| 281 | * @param ip the node IP address |
| 282 | * @param user the node user name |
| 283 | * @param pass the node password |
| 284 | * @param command the command to run |
| 285 | * @param sshArgs arguments for the ssh command |
| 286 | * @return the output of the command |
| 287 | */ |
| 288 | def runCmd(ip, user, pass, command, sshArgs="") { |
| 289 | return sh(returnStdout: true, script: "sshpass -p ${pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${user} ${ip} \"${command}\"") |
| 290 | } |
| 291 | |
| 292 | /** |
| 293 | * Runs a command on the head node. |
| 294 | * |
| 295 | * @param command the command to run |
| 296 | * @param sshArgs arguments for the ssh command |
| 297 | * @return the output of the command |
| 298 | */ |
| 299 | def runHeadNodeCmd(command, sshArgs="") { |
| 300 | return sh(returnStdout: true, script: "sshpass -p ${deployment_config.head.pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${deployment_config.head.user} ${deployment_config.head.ip} \"${command}\"") |
| 301 | } |
| 302 | |
| 303 | /** |
| 304 | * Runs a command on a fabric switch. |
| 305 | * |
| 306 | * @param ip the mgmt IP of the fabric switch, reachable from the head node |
| 307 | * @param user the mgmt user name of the fabric switch |
| 308 | * @param pass the mgmt password of the fabric switch |
| 309 | * @param command the command to run on the fabric switch |
| 310 | * @param ssgArgs arguments for the ssh command |
| 311 | * @return the output of the command |
| 312 | */ |
| 313 | def runFabricSwitchCmd(ip, user, pass, command, sshArgs="") { |
| 314 | return sh(returnStdout: true, script: "sshpass -p ${deployment_config.head.pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${deployment_config.head.user} ${deployment_config.head.ip} \"sshpass -p ${pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${user} ${ip} ${command}\"") |
| 315 | } |
| 316 | |
| 317 | /** |
| 318 | * Runs a command on all fabric switches |
| 319 | * |
| 320 | * @param command the command to run on the fabric switches |
| 321 | * @param ssgArgs arguments for the ssh command |
| 322 | */ |
| 323 | def runFabricSwitchCmdAll(command, sshArgs="") { |
| 324 | for(int i=0; i < deployment_config.fabric_switches.size(); i++) { |
| 325 | runFabricSwitchCmd("${deployment_config.fabric_switches[i].ip}", |
| 326 | "${deployment_config.fabric_switches[i].user}", |
| 327 | "${deployment_config.fabric_switches[i].pass}", |
| 328 | "${command}", |
| 329 | "${sshArgs}") |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | /** |
| 334 | * Runs a command on a compute node. |
| 335 | * |
| 336 | * @param name the name of the compute node |
| 337 | * @param command the command to run on the compute node |
| 338 | * @param ssgArgs arguments for the ssh command |
| 339 | * @return the output of the command |
| 340 | */ |
| 341 | def runComputeNodeCmd(name, command, sshArgs="") { |
| 342 | return sh(returnStdout: true, script: "sshpass -p ${deployment_config.head.pass} ssh ${sshArgs} -oStrictHostKeyChecking=no -l ${deployment_config.head.user} ${deployment_config.head.ip} \"ssh ${sshArgs} ubuntu@${name} ${command}\"") |
| 343 | } |
| 344 | |
| 345 | /** |
| 346 | * Runs a command on all compute nodes |
| 347 | * |
| 348 | * @param command the command to run on the compute nodes |
| 349 | * @param ssgArgs arguments for the ssh command |
| 350 | */ |
| 351 | def runComputeNodeCmdAll(command, sshArgs="") { |
| 352 | computeNamesAll = getComputeNames() |
| 353 | for (name in computeNamesAll) { |
| 354 | runComputeNodeCmd("${name}", "${command}", "${sshArgs}") |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | /** |
| 359 | * Runs an ONOS CLI command |
| 360 | * |
| 361 | * @param name the onos node name, reachable from the head node |
| 362 | * @param port the port used to login to ONOS CLI |
| 363 | * @param user the user name to login to ONOS CLI |
| 364 | * @param pass the password to login to ONOS CLI |
| 365 | * @param command the command to run in ONOS CLI |
| 366 | * @return the output of the command |
| 367 | */ |
| 368 | def runOnosCliCmd(name, port, user, pass, command) { |
| 369 | return sh(returnStdout: true, script: "sshpass -p ${deployment_config.head.pass} ssh -oStrictHostKeyChecking=no -l ${deployment_config.head.user} ${deployment_config.head.ip} \"sshpass -p ${pass} ssh -oStrictHostKeyChecking=no -l ${user} -p ${port} ${name} ${command}\"") |
| 370 | } |
| 371 | |
| 372 | /** |
| 373 | * Returns a list of compute node names. When "role" is specified, returns only |
| 374 | * names of compute nodes connected to the switch |
| 375 | * |
| 376 | * @param role the switch role, i.e. "leaf-1" |
| 377 | */ |
| 378 | def getComputeNames(role="") { |
| 379 | computeNamesAll = runHeadNodeCmd("cord prov list | grep node | awk '{print \\\$2}' | sed -e \\\"s/.*/'&'/\\\"").trim() |
| 380 | computeNamesAll = "${computeNamesAll}".split() |
| 381 | computeNamesAll = "${computeNamesAll}".replaceAll("'", "\"") |
| 382 | computeNamesAll = new JsonSlurperClassic().parseText("${computeNamesAll}") |
| 383 | if ("${role}" == "") return computeNamesAll |
| 384 | computeNames = [] |
| 385 | switchMac = "" |
| 386 | for(int i=0; i < deployment_config.fabric_switches.size(); i++) { |
| 387 | if(deployment_config.fabric_switches[i].containsKey("role")) { |
| 388 | if ("${deployment_config.fabric_switches[i].role}" == "${role}") |
| 389 | switchMac = "${deployment_config.fabric_switches[i].mac}" |
| 390 | } |
| 391 | } |
| 392 | if ("${switchMac}" != "") { |
| 393 | switchMac = switchMac.toLowerCase().replaceAll(':','') |
| 394 | // Get fabric IPs of compute nodes connected to the switch |
| 395 | try { |
| 396 | computeFabricIps = runHeadNodeCmd("sshpass -p rocks ssh -q -oStrictHostKeyChecking=no -l onos -p 8101 onos-fabric hosts -j | jq '.[] | select(.locations[].elementId | contains(\\\"${switchMac}\\\")) | .ipAddresses' | grep -o '\\\"${fabricIpPrefix}.[1-9][0-9]*.[0-9]*\\\"'", |
| 397 | "-q").trim() |
| 398 | }catch (exception) { |
| 399 | return computeNames |
| 400 | } |
| 401 | computeFabricIps = "${computeFabricIps}".split() |
| 402 | computeFabricIps = new JsonSlurperClassic().parseText("${computeFabricIps}") |
| 403 | // Figure out which compute node connects to the switch |
| 404 | for (name in computeNamesAll) { |
| 405 | fabricIp = runComputeNodeCmd("${name}", "ip a | grep -o '${fabricIpPrefix}.[1-9][0-9]*.[0-9]*'").trim() |
| 406 | if (fabricIp in computeFabricIps) { |
| 407 | computeNames.add("${name}") |
| 408 | } |
| 409 | } |
| 410 | } |
| 411 | return computeNames |
| 412 | } |
| 413 | |
| 414 | /** |
| 415 | * Connects all fabric switches and compute nodes to ONOS |
| 416 | */ |
| 417 | def connectFabricAndComputeToOnos() { |
| 418 | // Kill existing switch connections |
| 419 | runFabricSwitchCmdAll("./killit || echo no ofagentapp running") |
| 420 | // Clean stale ONOS data |
| 421 | runOnosCliCmd("onos-fabric", "8101", "onos", "rocks", "wipe-out -r -j please") |
| 422 | // Connect switches to ONOS |
| 423 | for(int i=0; i < deployment_config.fabric_switches.size(); i++) { |
| 424 | runFabricSwitchCmd("${deployment_config.fabric_switches[i].ip}", |
| 425 | "${deployment_config.fabric_switches[i].user}", |
| 426 | "${deployment_config.fabric_switches[i].pass}", |
| 427 | "./connect -bg 2>&1 > ${deployment_config.fabric_switches[i].ip}.log", |
| 428 | "-qftn") |
| 429 | } |
| 430 | // Verify ONOS has recognized the switches |
| 431 | timeout(time: 5) { |
| 432 | waitUntil { |
| 433 | try { |
| 434 | num = runHeadNodeCmd("\"sshpass -p rocks ssh -q -oStrictHostKeyChecking=no -l onos -p 8101 onos-fabric devices | grep available=true | wc -l\"").trim() |
| 435 | return num.toInteger() == deployment_config.fabric_switches.size() |
| 436 | } catch (exception) { |
| 437 | return false |
| 438 | } |
| 439 | } |
| 440 | } |
| 441 | // Connect compute nodes to ONOS |
| 442 | runComputeNodeCmdAll("ping -c 1 ${fabricIpPrefix}.1.254", "-qftn") |
| 443 | // Verify ONOS has recognized the hosts |
| 444 | timeout(time: 5) { |
| 445 | waitUntil { |
| 446 | try { |
| 447 | num = runHeadNodeCmd("\"sshpass -p rocks ssh -q -oStrictHostKeyChecking=no -l onos -p 8101 onos-fabric hosts | grep id= | wc -l\"").trim() |
| 448 | return num.toInteger() >= deployment_config.compute_nodes.size() |
| 449 | } catch (exception) { |
| 450 | return false |
| 451 | } |
| 452 | } |
| 453 | } |
| 454 | } |