Commit 37dffdb7 authored by Tim Rozet's avatar Tim Rozet Committed by Dan Radez

Adds some network parsing for baremetal deployments and other fixes

Changes include:
 - Fixes Intel Pod 2 inventory file
 - Check for DHCP server on the host and disable
 - Adds realistic+common network-settings file
 - Modifies baremetal deployments to bridge to correct interface
 - Adds private/storage network OVS bridges
 - Parses network-settings into valid network-environment variables
 - If certain network-settings are missing they will be auto-detected

Note: The actual settings set forth for deployment only include
admin/external networks at the moment.  Private/storage networks will be
handled in an upcoming patch.

JIRA: APEX-50

Change-Id: I0a1a86f37c08702a93fe167688c3149ba5573db4
Signed-off-by: default avatarTim Rozet <trozet@redhat.com>
parent d71330a6
......@@ -101,13 +101,15 @@ rpm:
pushd ../ && git archive --format=tar --prefix=opnfv-apex-$(RPMVERS)/ HEAD > build/opnfv-apex.tar
tar -u --xform="s:stack/instack.qcow2:opnfv-apex-$(RPMVERS)/build/instack.qcow2:" --file=opnfv-apex.tar stack/instack.qcow2
tar -u --xform="s:instack.xml:opnfv-apex-$(RPMVERS)/build/instack.xml:" --file=opnfv-apex.tar instack.xml
tar -u --xform="s:baremetalbrbm_brbm1_0.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_0.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_0.xml
tar -u --xform="s:baremetalbrbm_brbm1_1.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_1.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_1.xml
tar -u --xform="s:baremetalbrbm_brbm1_2.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_2.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_2.xml
tar -u --xform="s:baremetalbrbm_brbm1_3.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_3.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_3.xml
tar -u --xform="s:baremetalbrbm_brbm1_4.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_4.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_4.xml
tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_0.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_0.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_0.xml
tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_1.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_1.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_1.xml
tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_2.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_2.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_2.xml
tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_3.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_3.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_3.xml
tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_4.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_4.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_4.xml
tar -u --xform="s:brbm-net.xml:opnfv-apex-$(RPMVERS)/build/brbm-net.xml:" --file=opnfv-apex.tar brbm-net.xml
tar -u --xform="s:brbm1-net.xml:opnfv-apex-$(RPMVERS)/build/brbm1-net.xml:" --file=opnfv-apex.tar brbm1-net.xml
tar -u --xform="s:brbm2-net.xml:opnfv-apex-$(RPMVERS)/build/brbm2-net.xml:" --file=opnfv-apex.tar brbm2-net.xml
tar -u --xform="s:brbm3-net.xml:opnfv-apex-$(RPMVERS)/build/brbm3-net.xml:" --file=opnfv-apex.tar brbm3-net.xml
tar -u --xform="s:default-pool.xml:opnfv-apex-$(RPMVERS)/build/default-pool.xml:" --file=opnfv-apex.tar default-pool.xml
tar -u --xform="s:instackenv-virt.json:opnfv-apex-$(RPMVERS)/build/instackenv-virt.json:" --file=opnfv-apex.tar instackenv-virt.json
tar -u --xform="s:stack/overcloud-full-odl.qcow2:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.qcow2:" --file=opnfv-apex.tar stack/overcloud-full-odl.qcow2
......@@ -121,11 +123,11 @@ instack:
.PHONY: instack-clean
instack-clean:
rm -f instackenv-virt.json
rm -f baremetalbrbm_brbm1_0.xml
rm -f baremetalbrbm_brbm1_1.xml
rm -f baremetalbrbm_brbm1_2.xml
rm -f baremetalbrbm_brbm1_3.xml
rm -f baremetalbrbm_brbm1_4.xml
rm -f baremetalbrbm_brbm1_brbm2_brbm3_0.xml
rm -f baremetalbrbm_brbm1_brbm2_brbm3_1.xml
rm -f baremetalbrbm_brbm1_brbm2_brbm3_2.xml
rm -f baremetalbrbm_brbm1_brbm2_brbm3_3.xml
rm -f baremetalbrbm_brbm1_brbm2_brbm3_4.xml
rm -f instack.xml
.PHONY: iso
......
......@@ -7,6 +7,7 @@ rdo_images_uri=https://ci.centos.org/artifacts/rdo/images/liberty/delorean/stabl
vm_index=4
RDO_RELEASE=liberty
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null)
OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
# check for dependancy packages
for i in rpm-build createrepo libguestfs-tools python-docutils bsdtar; do
......@@ -88,7 +89,7 @@ sudo ../ci/clean.sh
# and rebuild the bare undercloud VMs
ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
set -e
NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1'" instack-virt-setup
NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1 brbm2 brbm3'" instack-virt-setup
EOI
# let dhcp happen so we can get the ip
......@@ -163,24 +164,28 @@ fi
echo $'\nGenerating libvirt configuration'
for i in \$(seq 0 $vm_index); do
virsh dumpxml baremetalbrbm_brbm1_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_\$i.xml
virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_brbm2_brbm3_\$i.xml
done
virsh dumpxml instack > instack.xml
virsh net-dumpxml brbm > brbm-net.xml
virsh net-dumpxml brbm1 > brbm1-net.xml
virsh net-dumpxml brbm2> brbm2-net.xml
virsh net-dumpxml brbm3 > brbm3-net.xml
virsh pool-dumpxml default > default-pool.xml
EOI
# copy off the instack artifacts
echo "Copying instack files to build directory"
for i in $(seq 0 $vm_index); do
scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_${i}.xml .
scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml .
done
scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml .
scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
scp ${SSH_OPTIONS[@]} stack@localhost:brbm2-net.xml .
scp ${SSH_OPTIONS[@]} stack@localhost:brbm3-net.xml .
scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
# pull down the the built images
......@@ -254,22 +259,6 @@ LIBGUESTFS_BACKEND=direct virt-customize --upload ../opendaylight-puppet-neutron
## END WORK AROUND
popd
# resize instack machine
echo "Checking if instack needs to be resized..."
instack_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a stack/instack.qcow2 |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$instack_size" -lt 30 ]; then
qemu-img create -f qcow2 -o preallocation=metadata newinstack.qcow2 30G
LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 stack/instack.qcow2 newinstack.qcow2;
LIBGUESTFS_BACKEND=direct virt-customize -a newinstack.qcow2 --run-command 'xfs_growfs -d /dev/sda1 || true'
LIBGUESTFS_BACKEND=direct virt-sparsify newinstack.qcow2 stack/instack.qcow2
new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a stack/instack.qcow2 |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
if [ "$new_size" -lt 30 ]; then
echo "Error resizing instack machine, disk size is ${new_size}"
exit 1
else
echo "instack successfully resized"
fi
fi
# move and Sanitize private keys from instack.json file
mv stack/instackenv.json instackenv-virt.json
sed -i '/pm_password/c\ "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
......@@ -281,8 +270,8 @@ set -e
virsh destroy instack 2> /dev/null || echo -n ''
virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
for i in \$(seq 0 $vm_index); do
virsh destroy baremetalbrbm_brbm1_\$i 2> /dev/null || echo -n ''
virsh undefine baremetalbrbm_brbm1_\$i --remove-all-storage 2> /dev/null || echo -n ''
virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_\$i 2> /dev/null || echo -n ''
virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_\$i --remove-all-storage 2> /dev/null || echo -n ''
done
EOI
......@@ -37,3 +37,34 @@ parameter_defaults:
ExternalInterfaceDefaultRoute: 192.168.37.1
EC2MetadataIp: 192.0.2.1
DnsServers: ["8.8.8.8","8.8.4.4"]
# ServiceNetMap:
# NeutronTenantNetwork: tenant
# CeilometerApiNetwork: internal_api
# MongoDbNetwork: internal_api
# CinderApiNetwork: internal_api
# CinderIscsiNetwork: storage
# GlanceApiNetwork: storage
# GlanceRegistryNetwork: internal_api
# KeystoneAdminApiNetwork: internal_api
# KeystonePublicApiNetwork: internal_api
# NeutronApiNetwork: internal_api
# HeatApiNetwork: internal_api
# NovaApiNetwork: internal_api
# NovaMetadataNetwork: internal_api
# NovaVncProxyNetwork: internal_api
# SwiftMgmtNetwork: storage_mgmt
# SwiftProxyNetwork: storage
# HorizonNetwork: internal_api
# MemcachedNetwork: internal_api
# RabbitMqNetwork: internal_api
# RedisNetwork: internal_api
# MysqlNetwork: internal_api
# CephClusterNetwork: storage_mgmt
# CephPublicNetwork: storage
# # Define which network will be used for hostname resolution
# ControllerHostnameResolveNetwork: internal_api
# ComputeHostnameResolveNetwork: internal_api
# BlockStorageHostnameResolveNetwork: internal_api
# ObjectStorageHostnameResolveNetwork: internal_api
# CephStorageHostnameResolveNetwork: storage
......@@ -106,7 +106,7 @@ resources:
members:
-
type: interface
name: nic2
name: nic3
# force the MAC address of the bridge to this interface
primary: true
......
Name: opnfv-apex
Version: 2.6
Version: 2.7
Release: %{release}
Summary: Scripts and Disk images for deployment
......@@ -33,12 +33,16 @@ install ci/clean.sh %{buildroot}%{_bindir}/opnfv-clean
mkdir -p %{buildroot}%{_var}/opt/opnfv/stack/
mkdir -p %{buildroot}%{_var}/opt/opnfv/nics/
mkdir -p %{buildroot}%{_var}/opt/opnfv/lib/
install lib/common-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
install build/instack.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/
install build/instack.xml %{buildroot}%{_var}/opt/opnfv/
install build/baremetalbrbm_brbm1_*.xml %{buildroot}%{_var}/opt/opnfv/
install build/baremetalbrbm_brbm1_brbm2_brbm3_*.xml %{buildroot}%{_var}/opt/opnfv/
install build/brbm-net.xml %{buildroot}%{_var}/opt/opnfv/
install build/brbm1-net.xml %{buildroot}%{_var}/opt/opnfv/
install build/brbm2-net.xml %{buildroot}%{_var}/opt/opnfv/
install build/brbm3-net.xml %{buildroot}%{_var}/opt/opnfv/
install build/default-pool.xml %{buildroot}%{_var}/opt/opnfv/
install build/network-environment.yaml %{buildroot}%{_var}/opt/opnfv/
install build/nics/controller.yaml %{buildroot}%{_var}/opt/opnfv/nics/
......@@ -55,15 +59,19 @@ install docs/installation-instructions.html %{buildroot}%{_docdir}/opnfv/
install docs/release-notes/index.rst %{buildroot}%{_docdir}/opnfv/release-notes.rst
install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/
install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_settings.yaml.example
install config/deploy/network/network_settings.yaml %{buildroot}%{_docdir}/opnfv/network_settings.yaml.example
%files
%{_bindir}/opnfv-deploy
%{_bindir}/opnfv-clean
%{_var}/opt/opnfv/lib/common-functions.sh
%{_var}/opt/opnfv/stack/instack.qcow2
%{_var}/opt/opnfv/instack.xml
%{_var}/opt/opnfv/baremetalbrbm_brbm1_*.xml
%{_var}/opt/opnfv/baremetalbrbm_brbm1_brbm2_brbm3_*.xml
%{_var}/opt/opnfv/brbm-net.xml
%{_var}/opt/opnfv/brbm1-net.xml
%{_var}/opt/opnfv/brbm2-net.xml
%{_var}/opt/opnfv/brbm3-net.xml
%{_var}/opt/opnfv/default-pool.xml
%{_var}/opt/opnfv/network-environment.yaml
%{_var}/opt/opnfv/nics/controller.yaml
......@@ -77,9 +85,11 @@ install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_s
%doc %{_docdir}/opnfv/release-notes.rst
%doc %{_docdir}/opnfv/release-notes.html
%doc %{_docdir}/opnfv/deploy_settings.yaml.example
%doc %{_docdir}/opnfv/network_settings.yaml.example
%changelog
* Tue Dec 20 2015 Tim Rozet <trozet@redhat.com> - 2.7-1
- Modifies networks to include OPNFV private/storage networks
* Tue Dec 15 2015 Dan Radez <dradez@redhat.com> - 2.6-1
- Added deploy settings for flat network config
- cleaned up files that don't need to be in the rpm
......
......@@ -4,7 +4,7 @@
#author: Dan Radez (dradez@redhat.com)
#
vm_index=4
ovs_bridges="brbm brbm1 brbm2 brbm3"
# Clean off instack VM
virsh destroy instack 2> /dev/null || echo -n ''
virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
......@@ -13,20 +13,18 @@ rm -f /var/lib/libvirt/images/instack.qcow2 2> /dev/null
# Clean off baremetal VMs in case they exist
for i in $(seq 0 $vm_index); do
virsh destroy baremetalbrbm_brbm1_$i 2> /dev/null || echo -n ''
virsh undefine baremetalbrbm_brbm1_$i --remove-all-storage 2> /dev/null || echo -n ''
virsh vol-delete baremetalbrbm_brbm1_${i}.qcow2 --pool default 2> /dev/null
rm -f /var/lib/libvirt/images/baremetalbrbm_brbm1_${i}.qcow2 2> /dev/null
virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_$i 2> /dev/null || echo -n ''
virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_$i --remove-all-storage 2> /dev/null || echo -n ''
virsh vol-delete baremetalbrbm_brbm1_brbm2_brbm3_${i}.qcow2 --pool default 2> /dev/null
rm -f /var/lib/libvirt/images/baremetalbrbm_brbm1_brbm2_brbm3_${i}.qcow2 2> /dev/null
done
# Clean off brbm bridges
virsh net-destroy brbm 2> /dev/null
virsh net-undefine brbm 2> /dev/null
vs-vsctl del-br brbm 2> /dev/null
virsh net-destroy brbm1 2> /dev/null
virsh net-undefine brbm1 2> /dev/null
vs-vsctl del-br brbm1 2> /dev/null
# Clean off created bridges
for bridge in ${ovs_bridges}; do
virsh net-destroy ${bridge} 2> /dev/null
virsh net-undefine ${bridge} 2> /dev/null
ovs-vsctl del-br ${bridge} 2> /dev/null
done
# clean pub keys from root's auth keys
sed -i '/stack@instack.localdomain/d' /root/.ssh/authorized_keys
......
This diff is collapsed.
global_params:
ha_enabled: true
network_isolation_settings: network/network-environment-example.yaml
deploy_options:
# instack_ip
# IP address given to instack on the provisioning network
instack_ip: 192.0.2.1
# CIDR of provisioning network
provisioning_cidr: 192.0.2.0/24
# gateway IP of provisioning network
provisioning_gateway: 192.0.2.1
# IP pool start used for provisioning overcloud nodes
provisioning_dhcp_start: 192.0.2.5
# IP pool end used for inspecting overcloud nodes
provisioning_dhcp_end: 192.0.2.24
# IP pool used for inspecting overcloud nodes on the provisioning network
provisioning_inspection_iprange: 192.0.2.100,192.0.2.124
sdn_controller: opendaylight
tacker: false
congress: false
# CIDR used to for the external network
ext_net_cidr: 192.168.37.0/24
# Allocation pools for floating ip addresses on the ext net
ext_allocation_pool_start: 192.168.37.50
ext_allocation_pool_end: 192.168.37.99
# Default Gateway for External Network
ext_gateway: 192.168.37.1
resource_registry:
OS::TripleO::BlockStorage::Net::SoftwareConfig: /home/stack/nic-configs/cinder-storage.yaml
OS::TripleO::Compute::Net::SoftwareConfig: /home/stack/nic-configs/compute.yaml
OS::TripleO::Controller::Net::SoftwareConfig: /home/stack/nic-configs/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: /home/stack/nic-configs/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: /home/stack/nic-configs/ceph-storage.yaml
parameter_defaults:
# Customize all these values to match the local environment
InternalApiNetCidr: 172.17.0.0/24
StorageNetCidr: 172.18.0.0/24
StorageMgmtNetCidr: 172.19.0.0/24
TenantNetCidr: 172.16.0.0/24
ExternalNetCidr: 10.1.2.0/24
# CIDR subnet mask length for provisioning network
ControlPlaneSubnetCidr: 24
InternalApiAllocationPools: [{'start': '172.17.0.10', 'end': '172.17.0.200'}]
StorageAllocationPools: [{'start': '172.18.0.10', 'end': '172.18.0.200'}]
StorageMgmtAllocationPools: [{'start': '172.19.0.10', 'end': '172.19.0.200'}]
TenantAllocationPools: [{'start': '172.16.0.10', 'end': '172.16.0.200'}]
# Use an External allocation pool which will leave room for floating IPs
ExternalAllocationPools: [{'start': '10.1.2.10', 'end': '10.1.2.50'}]
# Set to the router gateway on the external network
ExternalInterfaceDefaultRoute: 10.1.2.1
# Gateway router for the provisioning network (or Undercloud IP)
ControlPlaneDefaultRoute: 192.0.2.254
# Generally the IP of the Undercloud
EC2MetadataIp: 192.0.2.1
# Define the DNS servers (maximum 2) for the overcloud nodes
DnsServers: ["8.8.8.8","8.8.4.4"]
InternalApiNetworkVlanID: 201
StorageNetworkVlanID: 202
StorageMgmtNetworkVlanID: 203
TenantNetworkVlanID: 204
ExternalNetworkVlanID: 100
# May set to br-ex if using floating IPs only on native VLAN on bridge br-ex
NeutronExternalNetworkBridge: "''"
# Customize bonding options if required (ignored if bonds are not used)
BondInterfaceOvsOptions:
"bond_mode=balance-tcp lacp=active other-config:lacp-fallback-ab=true"
ServiceNetMap:
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
MongoDbNetwork: internal_api
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
GlanceApiNetwork: storage
GlanceRegistryNetwork: internal_api
KeystoneAdminApiNetwork: internal_api
KeystonePublicApiNetwork: internal_api
NeutronApiNetwork: internal_api
HeatApiNetwork: internal_api
NovaApiNetwork: internal_api
NovaMetadataNetwork: internal_api
NovaVncProxyNetwork: internal_api
SwiftMgmtNetwork: storage_mgmt
SwiftProxyNetwork: storage
HorizonNetwork: internal_api
MemcachedNetwork: internal_api
RabbitMqNetwork: internal_api
RedisNetwork: internal_api
MysqlNetwork: internal_api
CephClusterNetwork: storage_mgmt
CephPublicNetwork: storage
# Define which network will be used for hostname resolution
ControllerHostnameResolveNetwork: internal_api
ComputeHostnameResolveNetwork: internal_api
BlockStorageHostnameResolveNetwork: internal_api
ObjectStorageHostnameResolveNetwork: internal_api
CephStorageHostnameResolveNetwork: storage
# Defines Network Environment for a Baremetal Deployment
# Any values missing will be auto-detected on the jumphost
admin_network:
enabled: true
network_type: bridged
bridged_interface: ''
bond_interfaces: ''
vlan: native
usable_ip_range: 192.0.2.11,192.0.2.99
gateway: 192.0.2.1
provisioner_ip: 192.0.2.1
cidr: 192.0.2.0/24
dhcp_range: 192.0.2.2,192.0.2.10
introspection_range: 192.0.2.100,192.0.2.120
private_network:
enabled: false
public_network:
enabled: true
network_type: ''
bridged_interface: ''
cidr: 192.168.37.0/24
gateway: 192.168.37.1
floating_ip_range: 192.168.37.200,192.168.37.220
usable_ip_range: 192.168.37.10,192.168.37.199
provisioner_ip: 192.168.37.1
storage_network:
enabled: false
#admin_network: #Required network, other networks can collapse into this network if not enabled
# enabled: true
# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
# bridged_interface: '' #Interface to bridge to for installer VM
# bond_interfaces: '' #Interfaces to create bond with for installer VM
# vlan: native #VLAN tag to use, native means none
# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
#private_network: #Network for internal API traffic for O/S services and internal tenant traffic
# enabled: false #If disabled, internal api traffic will collapse to admin_network
#public_network: #Network for external API traffic and external tenant traffic
# enabled: true #If disabled, public_network traffic will collapse to admin network
# network_type: ''
# bridged_interface: ''
# cidr: 192.168.37.0/24
# gateway: 192.168.37.1
# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
# provisioner_ip: 192.168.37.1
#storage_network: #Network for Ceph storage traffic
# enabled: false #If disabled, storage_network traffic will collapse to admin network
......@@ -5,7 +5,7 @@ nodes:
ipmi_user: root
ipmi_pass: root
cpus: 2
memory: 2048
memory: 8192
disk: 40
arch: "x86_64"
capabilities: "profile:control"
......@@ -15,27 +15,27 @@ nodes:
ipmi_user: root
ipmi_pass: root
cpus: 2
memory: 2048
memory: 8192
disk: 40
arch: "x86_64"
capabilities: "profile:control"
node3:
mac_address: "00:1e:67:4f:cc:f1"
mac_address: "00:1e:67:4f:cc:0b"
ipmi_ip: 10.4.7.4
ipmi_user: root
ipmi_pass: root
cpus: 2
memory: 2048
memory: 8192
disk: 40
arch: "x86_64"
capabilities: "profile:control"
node4:
mac_address: "00:1e:67:4f:cc:0b"
mac_address: "00:1e:67:4f:cc:f1"
ipmi_ip: 10.4.7.5
ipmi_user: root
ipmi_pass: root
cpus: 2
memory: 2048
memory: 8192
disk: 40
arch: "x86_64"
capabilities: "profile:compute"
......@@ -45,7 +45,7 @@ nodes:
ipmi_user: root
ipmi_pass: root
cpus: 2
memory: 2048
memory: 8192
disk: 40
arch: "x86_64"
capabilities: "profile:compute"
#!/usr/bin/env bash
# Common Functions used by OPNFV Apex
# author: Tim Rozet (trozet@redhat.com)
##find ip of interface
##params: interface name
function find_ip {
ip addr show $1 | grep -Eo '^\s+inet\s+[\.0-9]+' | awk '{print $2}'
}
##finds subnet of ip and netmask
##params: ip, netmask
function find_subnet {
IFS=. read -r i1 i2 i3 i4 <<< "$1"
IFS=. read -r m1 m2 m3 m4 <<< "$2"
printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
}
##verify subnet has at least n IPs
##params: subnet mask, n IPs
function verify_subnet_size {
IFS=. read -r i1 i2 i3 i4 <<< "$1"
num_ips_required=$2
##this function assumes you would never need more than 254
##we check here to make sure
if [ "$num_ips_required" -ge 254 ]; then
echo -e "\n\n${red}ERROR: allocating more than 254 IPs is unsupported...Exiting${reset}\n\n"
return 1
fi
##we just return if 3rd octet is not 255
##because we know the subnet is big enough
if [ "$i3" -ne 255 ]; then
return 0
elif [ $((254-$i4)) -ge "$num_ips_required" ]; then
return 0
else
echo -e "\n\n${red}ERROR: Subnet is too small${reset}\n\n"
return 1
fi
}
##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask
## Warning: This function only works for IPv4 at the moment.
##params: ip, netmask
function find_last_ip_subnet {
IFS=. read -r i1 i2 i3 i4 <<< "$1"
IFS=. read -r m1 m2 m3 m4 <<< "$2"
IFS=. read -r s1 s2 s3 s4 <<< "$((i1 & m1)).$((i2 & m2)).$((i3 & m3)).$((i4 & m4))"
printf "%d.%d.%d.%d\n" "$((255 - $m1 + $s1))" "$((255 - $m2 + $s2))" "$((255 - $m3 + $s3))" "$((255 - $m4 + $s4 - 1))"
}
##increments subnet by a value
##params: ip, value
##assumes low value
function increment_subnet {
IFS=. read -r i1 i2 i3 i4 <<< "$1"
printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 | $2))"
}
##finds netmask of interface
##params: interface
##returns long format 255.255.x.x
function find_netmask {
ifconfig $1 | grep -Eo 'netmask\s+[\.0-9]+' | awk '{print $2}'
}
##finds short netmask of interface
##params: interface
##returns short format, ex: /21
function find_short_netmask {
echo "/$(ip addr show $1 | grep -Eo '^\s+inet\s+[\/\.0-9]+' | awk '{print $2}' | cut -d / -f2)"
}
##increments next IP
##params: ip
##assumes a /24 subnet
function next_ip {
baseaddr="$(echo $1 | cut -d. -f1-3)"
lsv="$(echo $1 | cut -d. -f4)"
if [ "$lsv" -ge 254 ]; then
return 1
fi
((lsv++))
echo $baseaddr.$lsv
}
##subtracts a value from an IP address
##params: last ip, ip_count
##assumes ip_count is less than the last octect of the address
subtract_ip() {
IFS=. read -r i1 i2 i3 i4 <<< "$1"
ip_count=$2
if [ $i4 -lt $ip_count ]; then
echo -e "\n\n${red}ERROR: Can't subtract $ip_count from IP address $1 Exiting${reset}\n\n"
exit 1
fi
printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 - $ip_count ))"
}
##check if IP is in use
##params: ip
##ping ip to get arp entry, then check arp
function is_ip_used {
ping -c 5 $1 > /dev/null 2>&1
arp -n | grep "$1 " | grep -iv incomplete > /dev/null 2>&1
}
##find next usable IP
##params: ip
function next_usable_ip {
new_ip=$(next_ip $1)
while [ "$new_ip" ]; do
if ! is_ip_used $new_ip; then
echo $new_ip
return 0
fi
new_ip=$(next_ip $new_ip)
done
return 1
}
##increment ip by value
##params: ip, amount to increment by
##increment_ip $next_private_ip 10
function increment_ip {
baseaddr="$(echo $1 | cut -d. -f1-3)"
lsv="$(echo $1 | cut -d. -f4)"
incrval=$2
lsv=$((lsv+incrval))
if [ "$lsv" -ge 254 ]; then
return 1
fi
echo $baseaddr.$lsv
}
##finds gateway on system
##params: interface to validate gateway on (optional)
##find_gateway em1
function find_gateway {
local gw gw_interface
gw=$(ip route | grep default | awk '{print $3}')
gw_interface=$(ip route get $gw | awk '{print $3}')
if [ -n "$1" ]; then
if [ "$gw_interface" == "$1" ]; then
echo ${gw}
fi
fi
}
##finds subnet in CIDR notation for interface
##params: interface to find CIDR
function find_cidr {
local cidr network ip netmask short_mask
ip=$(find_ip $1)
netmask=$(find_netmask $1)
if [[ -z "$ip" || -z "$netmask" ]]; then
return 1
fi
network=$(find_subnet ${ip} ${netamsk})
short_mask=$(find_short_netmask $1)
if [[ -z "$network" || -z "$short_mask" ]]; then
return 1
fi
cidr="${subnet}'\'${short_mask}"
echo ${cidr}
}
##finds block of usable IP addresses for an interface
##simply returns at the moment the correct format
##after first 20 IPs, and leave 20 IPs at end of subnet (for floating ips, etc)
##params: interface to find IP
function find_usable_ip_range {
local interface_ip subnet_mask first_block_ip last_block_ip
interface_ip=$(find_ip $1)
subnet_mask=$(find_netmask $1)
if [[ -z "$interface_ip" || -z "$subnet_mask" ]]; then
return 1
fi
interface_ip=$(increment_ip ${interface_ip} 20)
first_block_ip=$(next_usable_ip ${interface_ip})
if [ -z "$first_block_ip" ]; then
return 1
fi
last_block_ip=$(find_last_ip_subnet ${interface_ip} ${subnet_mask})
if [ -z "$last_block_ip" ]; then
return 1
else
last_block_ip=$(subtract_ip ${last_block_ip} 20)
echo "${first_block_ip},${last_block_ip}"
fi
}
##generates usable IP range in correct format based on CIDR
##assumes the first 20 IPs are used (by instack or otherwise)
##params: cidr
function generate_usable_ip_range {
local first_ip first_block_ip last_block_ip
first_ip=$(ipcalc -nb $1 | grep HostMin: | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
first_block_ip=$(increment_ip ${first_ip} 20)
last_block_ip=$(ipcalc -nb $1 | grep HostMax: | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
if [[ -z "$first_block_ip" || -z "$last_block_ip" ]]; then
return 1
else
last_block_ip=$(subtract_ip ${last_block_ip} 20)
echo "${first_block_ip},${last_block_ip}"
fi
}
##find the instack IP address
##finds first usable IP on subnet
##params: interface
function find_provisioner_ip {
local interface_ip
interface_ip=$(find_ip $1)
if [ -z "$interface_ip" ]; then