Commit 35e82726 authored by Steven Pisarski's avatar Steven Pisarski

Merge branch 'master' of https://gerrit.opnfv.org/gerrit/apex

# Conflicts:
#	lib/python/apex/network_settings.py
#	tests/test_apex_python_utils_py.py
parents e2c20f0f 234ed195
......@@ -20,8 +20,9 @@ IRC: Server:freenode.net Channel:#opnfv-apex
Repository: apex
Committers:
dradez@redhat.com
trozet@redhat.com
michapma@redhat.com
Dan Radez (dradez@redhat.com)
Tim Rozet (trozet@redhat.com)
Michael Chapman (michapma@redhat.com)
Feng Pan (fpan@redhat.com)
Link to TSC approval of the project: http://ircbot.wl.linuxfoundation.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-07-14-14.00.html
This diff is collapsed.
......@@ -6,6 +6,8 @@ parameters:
OvercloudControlFlavor: control
OvercloudComputeFlavor: compute
ControllerEnableSwiftStorage: false
NeutronEnableForceMetadata: true
NeutronEnableDHCPMetadata: true
# CloudDomain:
EnableSahara: false
EnableTacker: true
......@@ -22,3 +24,5 @@ parameters:
key: 'os_compute_api:servers:show:host_status'
value: 'rule:admin_or_owner'
parameter_defaults:
CeilometerStoreEvents: true
......@@ -46,7 +46,7 @@ pushd images > /dev/null
dpdk_pkg_str=''
for package in ${dpdk_rpms[@]}; do
curl -O "$dpdk_uri_base/$package"
wget "$dpdk_uri_base/$package"
dpdk_pkg_str+=" --upload $package:/root/dpdk_rpms"
done
......@@ -58,21 +58,13 @@ git archive --format=tar.gz --prefix=congress/ origin/stable/mitaka > ../puppet-
popd > /dev/null
# create fd.io yum repo file
cat > /tmp/fdio-master.repo << EOF
[fdio-master]
name=fd.io master branch latest merge
baseurl=https://nexus.fd.io/content/repositories/fd.io.master.centos7/
enabled=1
gpgcheck=0
EOF
cat > /tmp/tacker.repo << EOF
[tacker-trozet]
name=Tacker RPMs built from https://github.com/trozet/ tacker repositories
baseurl=http://radez.fedorapeople.org/tacker/
enabled=1
gpgcheck=0
EOF
#cat > /tmp/fdio-master.repo << EOF
#[fdio-master]
#name=fd.io master branch latest merge
#baseurl=https://nexus.fd.io/content/repositories/fd.io.master.centos7/
#enabled=1
#gpgcheck=0
#EOF
# tar up the fd.io module
rm -rf puppet-fdio
......@@ -104,7 +96,6 @@ popd > /dev/null
# install fd.io yum repo and packages
# upload puppet fdio
# git clone vsperf into the overcloud image
# upload tacker repo and install the packages
# upload the tacker puppet module and untar it
LIBGUESTFS_BACKEND=direct virt-customize \
--upload ../opnfv-puppet-tripleo.tar.gz:/etc/puppet/modules \
......@@ -127,19 +118,23 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "cd /etc/puppet/modules/ && tar xzf puppet-congress.tar.gz" \
--run-command "cd /usr/lib/python2.7/site-packages/congress/datasources && curl -O $doctor_driver" \
--run-command "sed -i \"s/'--detailed-exitcodes',/'--detailed-exitcodes','-l','syslog','-l','console',/g\" /var/lib/heat-config/hooks/puppet" \
--upload /tmp/fdio-master.repo:/etc/yum.repos.d/fdio-master.repo \
--install unzip,vpp,honeycomb \
--upload ../vpp-bin.tar.gz:/root \
--run-command "cd /root && tar zxvf vpp-bin.tar.gz" \
--run-command "yum install -y /root/vpp-bin/*.rpm" \
--run-command "tar zxvf /root/vpp-bin/vpp_papi*.tar.gz -C /" \
--install unzip \
--upload puppet-fdio.tar.gz:/etc/puppet/modules \
--run-command "cd /etc/puppet/modules && tar xzf puppet-fdio.tar.gz" \
--upload vsperf.tar.gz:/var/opt \
--run-command "cd /var/opt && tar xzf vsperf.tar.gz" \
--upload /tmp/tacker.repo:/etc/yum.repos.d/ \
--install "python-tackerclient" \
--upload ../noarch/openstack-tacker-2015.2-1.noarch.rpm:/root/ \
--install /root/openstack-tacker-2015.2-1.noarch.rpm \
--upload ../noarch/python-tackerclient-2015.2-1.trozet.noarch.rpm:/root/ \
--install /root/python-tackerclient-2015.2-1.trozet.noarch.rpm \
--upload ../noarch/openstack-tacker-2015.2-1.trozet.noarch.rpm:/root/ \
--install /root/openstack-tacker-2015.2-1.trozet.noarch.rpm \
--upload puppet-tacker.tar.gz:/etc/puppet/modules/ \
--run-command "cd /etc/puppet/modules/ && tar xzf puppet-tacker.tar.gz" \
--run-command "yum install -y https://dl.dropboxusercontent.com/u/7079970/rabbitmq-server-3.6.3-5.el7ost.noarch.rpm" \
--run-command "pip install python-senlinclient" \
-a overcloud-full_build.qcow2
mv -f overcloud-full_build.qcow2 overcloud-full.qcow2
......
......@@ -19,8 +19,8 @@ cp -f overcloud-full.qcow2 overcloud-full-onos_build.qcow2
#######################################
# upgrade ovs into ovs 2.5.90 with NSH function
curl -L -O ${onos_ovs_uri}/package_ovs_rpm2.tar.gz
tar -xzf package_ovs_rpm2.tar.gz
curl -L -O ${onos_ovs_uri}/${onos_ovs_pkg}
tar -xzf ${onos_ovs_pkg}
LIBGUESTFS_BACKEND=direct virt-customize --upload ${ovs_kmod_rpm_name}:/root/ \
--run-command "yum install -y /root/${ovs_kmod_rpm_name}" \
--upload ${ovs_rpm_name}:/root/ \
......
......@@ -20,8 +20,8 @@ cp -f overcloud-full-opendaylight.qcow2 overcloud-full-opendaylight-sfc_build.qc
# upgrade ovs into ovs 2.5.90 with NSH function
if ! [[ -f "$ovs_rpm_name" && -f "$ovs_kmod_rpm_name" ]]; then
curl -L -O ${onos_ovs_uri}/package_ovs_rpm2.tar.gz
tar -xzf package_ovs_rpm2.tar.gz
curl -L -O ${onos_ovs_uri}/${onos_ovs_pkg}
tar -xzf ${onos_ovs_pkg}
fi
LIBGUESTFS_BACKEND=direct virt-customize --upload ${ovs_kmod_rpm_name}:/root/ \
......
......@@ -18,6 +18,13 @@ cp -f overcloud-full.qcow2 overcloud-full-opendaylight_build.qcow2
##### Adding OpenDaylight to overcloud #####
###############################################
# tar up fdio networking-odl
rm -rf fds
git clone https://gerrit.opnfv.org/gerrit/fds
pushd fds > /dev/null
tar -czvf ../networking-odl.tar.gz networking-odl
popd > /dev/null
# Beryllium Repo
cat > /tmp/opendaylight.repo << EOF
[opendaylight-4-release]
......@@ -39,10 +46,20 @@ EOF
# SDNVPN - Copy tunnel setup script
wget https://raw.githubusercontent.com/openstack/fuel-plugin-opendaylight/brahmaputra-sr2/deployment_scripts/puppet/modules/opendaylight/templates/setup_TEPs.py
# tar up the honeycomb module
rm -rf puppet-honeycomb
git clone https://github.com/trozet/puppet-honeycomb
pushd puppet-honeycomb > /dev/null
git archive --format=tar.gz --prefix=honeycomb/ HEAD > ../puppet-honeycomb.tar.gz
popd > /dev/null
# install ODL packages
# install Jolokia for ODL HA
# Patch in OPNFV custom puppet-tripleO
# install Honeycomb
# install Honeycomb puppet module
LIBGUESTFS_BACKEND=direct virt-customize \
--upload networking-odl.tar.gz:/root/ \
--upload /tmp/opendaylight_boron.repo:/etc/yum.repos.d/opendaylight.repo \
--run-command "yum install --downloadonly --downloaddir=/root/boron/ opendaylight" \
--upload /tmp/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
......@@ -50,6 +67,9 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--install https://github.com/michaeltchapman/networking_rpm/raw/master/openstack-neutron-bgpvpn-2015.2-1.el7.centos.noarch.rpm \
--run-command "wget https://github.com/rhuss/jolokia/releases/download/v1.3.3/jolokia-1.3.3-bin.tar.gz -O /tmp/jolokia-1.3.3-bin.tar.gz" \
--run-command "tar -xvf /tmp/jolokia-1.3.3-bin.tar.gz -C /opt/opendaylight/system/org" \
--run-command "yum -y install https://github.com/marosmars/files/raw/master/honeycomb-1.0.0-99.noarch.rpm" \
--upload puppet-honeycomb.tar.gz:/etc/puppet/modules \
--run-command "cd /etc/puppet/modules && tar xzf puppet-honeycomb.tar.gz" \
--upload ./setup_TEPs.py:/tmp \
-a overcloud-full-opendaylight_build.qcow2
......
......@@ -2,7 +2,7 @@
Name: openstack-tacker
Version: 2015.2
Release: 1
Release: 1.trozet
Summary: OpenStack servicevm/device manager
Group: Applications/Internet
......
......@@ -22,7 +22,7 @@ https://wiki.opnfv.org/apex
%setup -q
%build
rst2html docs/installation-instructions/index.rst docs/installation-instructions.html
rst2html docs/installationprocedure/index.rst docs/installation-instructions.html
rst2html docs/release-notes/release-notes.rst docs/release-notes.html
%global __python %{__python3}
......@@ -131,11 +131,11 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%doc %{_docdir}/opnfv/inventory.yaml.example
%changelog
* Thu Aug 1 2016 Tim Rozet <trozet@redhat.com> - 3.0-11
* Mon Aug 1 2016 Tim Rozet <trozet@redhat.com> - 3.0-11
- Add nosdn fdio scenarios.
* Tue Jul 5 2016 Dan Radez <dradez@redhat.com> - 3.0-10
- Adding functions.sh files
* Thu Jun 15 2016 Tim Rozet <trozet@redhat.com> - 3.0-9
* Wed Jun 15 2016 Tim Rozet <trozet@redhat.com> - 3.0-9
- Add fdio scenarios.
* Tue Jun 14 2016 Feng Pan <fpan@redhat.com> - 3.0-8
- Add network_settings_v6.yaml
......
%define debug_package %{nil}
Name: python-tackerclient
Version: 2015.2
Release: 1.trozet
Summary: CLI and Client Library for OpenStack Networking
Group: Applications/Internet
License: Apache 2.0
URL: https://wiki.openstack.org/wiki/Tacker/Installation
Source0: python-tackerclient.tar.gz
BuildArch: noarch
BuildRequires: python-setuptools
#Requires: stevedore>=1.5.0 http oslo.config>=1.11.0 oslo.messaging!=1.17.0!=1.17.1>=1.16.0 oslo.rootwrap>=2.0.0 python-novaclient>=2.22.0
%description
CLI and Client Library for OpenStack Networking
%prep
%setup -q
%build
rm requirements.txt
#/usr/bin/python setup.py build
%install
/usr/bin/python setup.py install --prefix=%{buildroot} --install-lib=%{buildroot}/usr/lib/python2.7/site-packages
#rm -rf %{buildroot}/usr/lib/python2.7/site-packages/tacker/tests
%files
/bin/tacker
/usr/lib/python2.7/site-packages/tackerclient/*
/usr/lib/python2.7/site-packages/python_tackerclient-*
%changelog
......@@ -22,14 +22,6 @@ pushd opnfv-tht > /dev/null
git archive --format=tar.gz --prefix=openstack-tripleo-heat-templates/ HEAD > ../opnfv-tht.tar.gz
popd > /dev/null
cat > /tmp/tacker.repo << EOF
[tacker-trozet]
name=Tacker RPMs built from https://github.com/trozet/ tacker repositories
baseurl=http://radez.fedorapeople.org/tacker/
enabled=1
gpgcheck=0
EOF
pushd images > /dev/null
# installing forked opnfv-tht
# enabling ceph OSDs to live on the controller
......@@ -60,8 +52,12 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "sed -i '/PASSWORD_NAMES =/a\\ \"OVERCLOUD_TACKER_PASSWORD\",' /usr/lib/python2.7/site-packages/tripleoclient/utils.py" \
--run-command "sed -i '/AodhPassword/a\\ parameters\[\x27TackerPassword\x27\] = passwords\[\x27OVERCLOUD_TACKER_PASSWORD\x27\]' /usr/lib/python2.7/site-packages/tripleoclient/v1/overcloud_deploy.py" \
--run-command "sed -i '/^SERVICES/a\ \x27tacker\x27: {\x27description\x27: \x27Tacker Service\x27, \x27type\x27: \x27servicevm\x27, \x27path\x27: \x27/\x27, \x27port\x27: 1789 },' /usr/lib/python2.7/site-packages/os_cloud_config/keystone.py" \
--upload /tmp/tacker.repo:/etc/yum.repos.d/ \
--install "python-tackerclient" \
--upload ../noarch/python-tackerclient-2015.2-1.trozet.noarch.rpm:/root/ \
--install /root/python-tackerclient-2015.2-1.trozet.noarch.rpm \
--install "python2-aodhclient" \
--install "openstack-heat-engine" \
--install "openstack-heat-api-cfn" \
--install "openstack-heat-api" \
-a undercloud_build.qcow2
# Add custom IPA to allow kernel params
......
......@@ -12,16 +12,17 @@ rdo_images_uri=https://ci.centos.org/artifacts/rdo/images/mitaka/delorean/stable
onos_release_uri=https://downloads.onosproject.org/nightly/
onos_release_file=onos-1.6.0-rc2.tar.gz
onos_jdk_uri=https://www.dropbox.com/s/qyujpib8zyhzeev
onos_ovs_uri=https://www.dropbox.com/s/2dyd8zyt2l6p586
onos_ovs_uri=https://www.dropbox.com/s/ojknqcozb2w6z3l
onos_ovs_pkg=package_ovs_rpm3.tar.gz
doctor_driver=https://raw.githubusercontent.com/openstack/congress/master/congress/datasources/doctor_driver.py
dpdk_uri_base=http://artifacts.opnfv.org/ovsnfv
dpdk_rpms=(
'ovs4opnfv-32930523-dpdk-16.04.0-1.el7.centos.x86_64.rpm'
'ovs4opnfv-32930523-dpdk-devel-16.04.0-1.el7.centos.x86_64.rpm'
'ovs4opnfv-32930523-dpdk-examples-16.04.0-1.el7.centos.x86_64.rpm'
'ovs4opnfv-32930523-dpdk-tools-16.04.0-1.el7.centos.x86_64.rpm'
'ovs4opnfv-32930523-openvswitch-2.5.90-0.12032.gitc61e93d6.1.el7.centos.x86_64.rpm'
'ovs4opnfv-55ef39e7-dpdk-16.04.0-1.el7.centos.x86_64.rpm'
'ovs4opnfv-55ef39e7-dpdk-devel-16.04.0-1.el7.centos.x86_64.rpm'
'ovs4opnfv-55ef39e7-dpdk-examples-16.04.0-1.el7.centos.x86_64.rpm'
'ovs4opnfv-55ef39e7-dpdk-tools-16.04.0-1.el7.centos.x86_64.rpm'
'ovs4opnfv-55ef39e7-openvswitch-2.5.90-0.12032.gitc61e93d6.1.el7.centos.x86_64.rpm'
)
ovs_rpm_name=openvswitch-2.5.90-1.el7.centos.x86_64.rpm
......
......@@ -20,3 +20,13 @@
41,Add pcs cleanup exec
50,Fix rabbitmq ipv6 config
52,Add notifier topic to ceilometer
53,Add numa to controller hiera hierarchy
54,fix network mtu
56,fixes tacker config for heat_uri
57,Remove trailing newline from dpdk pci address
58,Enable Ceph on boot
61,Add dpdk bind lock file for vpp deployment
63,Fixes honeycomb on compute
62,Heat Domain
64,Fix missing metadata param
65,Add nic list and ip address to fdio class
\ No newline at end of file
......@@ -120,19 +120,19 @@ if [[ "$MAKE_TARGETS" == "images" ]]; then
MAKE_TARGETS+=" rpms-check"
else
# Spec files are selective
if [[ $commit_file_list == *build/opnfv-apex-undercloud.spec* ]]; then
if [[ $commit_file_list == *build/rpm_specs/opnfv-apex-undercloud.spec* ]]; then
MAKE_TARGETS+=" undercloud-rpm-check"
fi
if [[ $commit_file_list == *build/opnfv-apex-common.spec* ]]; then
if [[ $commit_file_list == *build/rpm_specs/opnfv-apex-common.spec* ]]; then
MAKE_TARGETS+=" common-rpm-check"
fi
if [[ $commit_file_list == *build/opnfv-apex.spec* ]]; then
if [[ $commit_file_list == *build/rpm_specs/opnfv-apex.spec* ]]; then
MAKE_TARGETS+=" opendaylight-rpm-check"
fi
if [[ $commit_file_list == *build/opnfv-apex-onos.spec* ]]; then
if [[ $commit_file_list == *build/rpm_specs/opnfv-apex-onos.spec* ]]; then
MAKE_TARGETS+=" onos-rpm-check"
fi
if [[ $commit_file_list == *build/opnfv-apex-opendaylight-sfc.spec* ]]; then
if [[ $commit_file_list == *build/rpm_specs/opnfv-apex-opendaylight-sfc.spec* ]]; then
MAKE_TARGETS+=" opendaylight-sfc-rpm-check"
fi
fi
......
......@@ -26,6 +26,7 @@ interactive="FALSE"
ping_site="8.8.8.8"
ntp_server="pool.ntp.org"
net_isolation_enabled="TRUE"
net_isolation_arg=""
post_config="TRUE"
debug="FALSE"
......@@ -44,7 +45,7 @@ OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_networ
VM_CPUS=4
VM_RAM=8
VM_COMPUTES=2
VM_COMPUTES=1
# Netmap used to map networks to OVS bridge names
NET_MAP['admin_network']="br-admin"
......@@ -130,6 +131,7 @@ parse_cmdline() {
;;
--flat )
net_isolation_enabled="FALSE"
net_isolation_arg="--flat"
echo "Underlay Network Isolation Disabled: using flat configuration"
shift 1
;;
......@@ -211,16 +213,16 @@ parse_cmdline() {
main() {
parse_cmdline "$@"
if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
echo -e "${blue}INFO: Parsing deploy settings file...${reset}"
parse_deploy_settings
fi
echo -e "${blue}INFO: Parsing network settings file...${reset}"
parse_network_settings
if ! configure_deps; then
echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
exit 1
fi
if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
echo -e "${blue}INFO: Parsing deploy settings file...${reset}"
parse_deploy_settings
fi
setup_undercloud_vm
if [ "$virtual" == "TRUE" ]; then
setup_virtual_baremetal $VM_CPUS $VM_RAM
......
......@@ -5,7 +5,7 @@
CONFIG=${CONFIG:-'/var/opt/opnfv'}
RESOURCES=${RESOURCES:-"$CONFIG/images"}
LIB=${LIB:-"$CONFIG/lib"}
VALID_CMDS="undercloud overcloud opendaylight debug-stack -h --help"
VALID_CMDS="undercloud overcloud opendaylight debug-stack mock-detached -h --help"
source $LIB/utility-functions.sh
......@@ -91,13 +91,28 @@ parse_cmdline() {
;;
mock-detached)
if [ "$2" == "on" ]; then
echo "Blocking output http and https traffic"
echo "Ensuring we can talk to gerrit.opnfv.org"
iptables -A OUTPUT -p tcp -d gerrit.opnfv.org --dport 443 -j ACCEPT
echo "Blocking output http (80) traffic"
iptables -A OUTPUT -p tcp --dport 80 -j REJECT
iptables -A FORWARD -p tcp --dport 80 -j REJECT
echo "Blocking output https (443) traffic"
iptables -A OUTPUT -p tcp --dport 443 -j REJECT
iptables -A FORWARD -p tcp --dport 443 -j REJECT
echo "Blocking output dns (53) traffic"
iptables -A FORWARD -p tcp --dport 53 -j REJECT
elif [ "$2" == "off" ]; then
echo "Allowing output http and https traffic"
echo "Cleaning gerrit.opnfv.org specific rule"
iptables -D OUTPUT -p tcp -d gerrit.opnfv.org --dport 443 -j ACCEPT
echo "Allowing output http (80) traffic"
iptables -D OUTPUT -p tcp --dport 80 -j REJECT
iptables -D FORWARD -p tcp --dport 80 -j REJECT
echo "Allowing output https (443) traffic"
iptables -D OUTPUT -p tcp --dport 443 -j REJECT
iptables -D FORWARD -p tcp --dport 443 -j REJECT
echo "Allowing output dns (53) traffic"
iptables -D OUTPUT -p tcp --dport 53 -j REJECT
iptables -D FORWARD -p tcp --dport 53 -j REJECT
else
display_usage
fi
......
......@@ -20,6 +20,6 @@ deploy_options:
libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 4096
hugepages: 2048
intel_iommu: 'on'
iommu: pt
......@@ -19,6 +19,6 @@ deploy_options:
libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 1024
hugepages: 2048
intel_iommu: 'on'
iommu: pt
......@@ -19,6 +19,6 @@ deploy_options:
libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 1024
hugepages: 2048
intel_iommu: 'on'
iommu: pt
......@@ -21,6 +21,6 @@ deploy_options:
libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 4096
hugepages: 2048
intel_iommu: 'on'
iommu: pt
......@@ -23,6 +23,6 @@ deploy_options:
libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 4096
hugepages: 2048
intel_iommu: 'on'
iommu: pt
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. This work is licensed under a
.. Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV
......@@ -6,5 +7,5 @@
Apex configuration
==================
.. include:: ../installation-instructions/introduction.rst
.. include:: ../installation-instructions/baremetal.rst
.. include:: ../installationprocedure/introduction.rst
.. include:: ../installationprocedure/baremetal.rst
Triple-O Deployment Architecture
================================
Apex is based on the OpenStack Triple-O project as distributed by
the RDO Project. It is important to understand the basics
of a Triple-O deployment to help make decisions that will assist in
successfully deploying OPNFV.
Triple-O stands for OpenStack On OpenStack. This means that OpenStack
will be used to install OpenStack. The target OPNFV deployment is an
OpenStack cloud with NFV features built-in that will be deployed by a
smaller all-in-one deployment of OpenStack. In this deployment
methodology there are two OpenStack installations. They are referred
to as the undercloud and the overcloud. The undercloud is used to
deploy the overcloud.
The undercloud is the all-in-one installation of OpenStack that includes
baremetal provisioning. RDO Manager's deployment of the undercloud is
call Undercloud. Undercloud will be deployed as a virtual machine on a jumphost.
This VM is pre-built and distributed as part of the Apex RPM.
The overcloud is OPNFV. Configuration will be passed into Undercloud and
Undercloud will use OpenStack's orchestration component call Heat to
execute a deployment will provision the target nodes to become OPNFV.
Triple-O Deployment Architecture
================================
Apex is based on the OpenStack Triple-O project as distributed by
the RDO Project. It is important to understand the basics
of a Triple-O deployment to help make decisions that will assist in
successfully deploying OPNFV.
Triple-O stands for OpenStack On OpenStack. This means that OpenStack
will be used to install OpenStack. The target OPNFV deployment is an
OpenStack cloud with NFV features built-in that will be deployed by a
smaller all-in-one deployment of OpenStack. In this deployment
methodology there are two OpenStack installations. They are referred
to as the undercloud and the overcloud. The undercloud is used to
deploy the overcloud.
The undercloud is the all-in-one installation of OpenStack that includes
baremetal provisioning capability. The undercloud will be deployed as a
virtual machine on a jumphost. This VM is pre-built and distributed as part
of the Apex RPM.
The overcloud is OPNFV. Configuration will be passed into undercloud and
the undercloud will use OpenStack's orchestration component, named Heat, to
execute a deployment that will provision the target OPNFV nodes.
OPNFV Scenario Architecture
===========================
OPNFV distinguishes different types of SDN controllers, deployment options, and
features into "scenarios". These scenarios are universal across all OPNFV
installers, although some may or may not be supported by each installer.
The standard naming convention for a scenario is:
<VIM platform>-<SDN type>-<feature>-<ha/noha>
The only supported VIM type is "OS" (OpenStack), while SDN types can be any
supported SDN controller. "feature" includes things like ovs_dpdk, sfc, etc.
"ha" or "noha" determines if the deployment will be highly available. If "ha"
is used at least 3 control nodes are required.
OPNFV Scenarios in Apex
=======================
Apex provides pre-built scenario files in /etc/opnfv-apex which a user can
select from to deploy the desired scenario. Simply pass the desired file to
the installer as a (-d) deploy setting. Read further in the Apex documentation
to learn more about invoking the deploy command. Below is quick reference
matrix for OPNFV scenarios supported in Apex. Please refer to the respective
OPNFV Docs documentation for each scenario in order to see a full scenario
description. The following scenarios correspond to a supported <Scenario>.yaml
deploy settings file:
+-------------------------+------------+-----------------+
| **Scenario** | **Owner** | **Known Issues**|
+-------------------------+------------+-----------------+
| os-nosdn-nofeature-ha | Apex | |
+-------------------------+------------+-----------------+
| os-nosdn-nofeature-noha | Apex | |
+-------------------------+------------+-----------------+
| os-nosdn-ovs-noha | OVS for NFV| |
+-------------------------+------------+-----------------+
| os-nosdn-fdio-noha | FDS | |
+-------------------------+------------+-----------------+
| os-odl_l2-nofeature-ha | Apex | |
+-------------------------+------------+-----------------+
| os-odl_l3-nofeature-ha | Apex | APEX-112 |
+-------------------------+------------+-----------------+
| os-odl_l2-sfc-noha | SFC | |
+-------------------------+------------+-----------------+
| os-odl_l2-bgpvpn-noha | SDNVPN | |
+-------------------------+------------+-----------------+
| os-odl_l2-fdio-noha | FDS | |
+-------------------------+------------+-----------------+
| os-onos-nofeature-ha | ONOSFW | |
+-------------------------+------------+-----------------+
| os-onos-sfc-ha | ONOSFW | |
+-------------------------+------------+-----------------+
......@@ -12,7 +12,7 @@ Preface
Apex uses Triple-O from the RDO Project OpenStack distribution as a
provisioning tool. The Triple-O image based life cycle installation
tool provisions an OPNFV Target System (3 controllers, n number of
tool provisions an OPNFV Target System (3 controllers, 2 or more
compute nodes) with OPNFV specific configuration provided by the Apex
deployment tool chain.
......
......@@ -12,12 +12,13 @@ The Jumphost requirements are outlined below:
3. libvirt virtualization support.
4. minimum 2 networks and maximum 6 networks, multiple NIC and/or VLAN combinations are supported.
This is virtualized for a VM deployment.
4. minimum 1 networks and maximum 5 networks, multiple NIC and/or VLAN
combinations are supported. This is virtualized for a VM deployment.
5. The Colorado Apex RPMs and their dependencies.
6. 16 GB of RAM for a bare metal deployment, 64 GB of RAM for a VM deployment.
6. 16 GB of RAM for a bare metal deployment, 64 GB of RAM for a VM
deployment.
Network Requirements
--------------------
......@@ -26,22 +27,33 @@ Network requirements include:
1. No DHCP or TFTP server running on networks used by OPNFV.
2. 2-6 separate networks with connectivity between Jumphost and nodes.
2. 1-5 separate networks with connectivity between Jumphost and nodes.
- Control Plane (Provisioning) / Private (API) Network
- Control Plane (Provisioning)
- Internal (Tenant Networking) Network
- Private Tenant-Networking Network*
- External Network
- Storage Network*
3. Lights out OOB network access from Jumphost with IPMI node enabled (bare metal deployment only).
- Internal API Network* (required for IPv6 \*\*)
4. Admin or public network has Internet access, meaning a gateway and DNS availability.
3. Lights out OOB network access from Jumphost with IPMI node enabled
(bare metal deployment only).
| `*` *These networks can be combined with each other or all combined on the Control Plane network.*
| `*` *Non-External networks will be consolidated to the Control Plane network if not specifically configured.*
4. External network is a routable network from outside the cloud,
deployment. The External network is where public internet access would
reside if available.
\* *These networks can be combined with each other or all combined on the
Control Plane network.*
\* *Non-External networks will be consolidated to the Control Plane network
if not specifically configured.*
\*\* *Internal API network, by default, is collapsed with provisioning in IPv4
deployments, this is not possible with the current lack of PXE boot
support and therefore the API network is required to be its own
network in an IPv6 deployment.*
Bare Metal Node Requirements
----------------------------
......@@ -63,4 +75,5 @@ In order to execute a deployment, one must gather the following information:
2. IPMI login information for the nodes (user/pass).
3. MAC address of Control Plane / Provisioning interfaces of the overcloud nodes.
3. MAC address of Control Plane / Provisioning interfaces of the overcloud
nodes.
Verifying the Setup
-------------------
Once the deployment has finished, the OPNFV deployment can be accessed via the Undercloud node. From
the jump host ssh to the Undercloud host and become the stack user. Alternativly ssh keys have been
setup such that the root user on the jump host can ssh to Undercloud directly as the stack user. For
convenience a utility script has been provided to look up the undercloud's ip address and ssh to the
undercloud all in one command. An optional user name can be passed to indicate whether to connect as
the stack or root user. The stack user is default if a username is not specified.
Once the deployment has finished, the OPNFV deployment can be accessed via the
undercloud node. From the jump host ssh to the undercloud host and become the
stack user. Alternativly ssh keys have been setup such that the root user on
the jump host can ssh to undercloud directly as the stack user. For
convenience a utility script has been provided to look up the undercloud's ip
address and ssh to the undercloud all in one command. An optional user name can
be passed to indicate whether to connect as the stack or root user. The stack
user is default if a username is not specified.
| ``opnfv-util undercloud root``
| ``su - stack``
Once connected to Undercloud as the stack user look for two keystone files that can be used to
interact with the undercloud and the overcloud. Source the appropriate RC file to interact with
the respective OpenStack deployment.
Once connected to undercloud as the stack user look for two keystone files that
can be used to interact with the undercloud and the overcloud. Source the
appropriate RC file to interact with the respective OpenStack deployment.
| ``source stackrc`` (Undercloud)
| ``source overcloudrc`` (Overcloud / OPNFV)
| ``source stackrc`` (undercloud)
| ``source overcloudrc`` (overcloud / OPNFV)
The contents of these files include the credentials for the administrative user for Undercloud and
OPNFV respectivly. At this point both Undercloud and OPNFV can be interacted with just as any
OpenStack installation can be. Start by listing the nodes in the undercloud that were used
to deploy the overcloud.
The contents of these files include the credentials for the administrative user
for undercloud and OPNFV respectivly. At this point both undercloud and OPNFV
can be interacted with just as any OpenStack installation can be. Start by
listing the nodes in the undercloud that were used to deploy the overcloud.
| ``source stackrc``
| ``openstack server list``
The control and compute nodes will be listed in the output of this server list command. The IP
addresses that are listed are the control plane addresses that were used to provision the nodes.
Use these IP addresses to connect to these nodes. Initial authentication requires using the
user heat-admin.
The control and compute nodes will be listed in the output of this server list
command. The IP addresses that are listed are the control plane addresses that
were used to provision the nodes. Use these IP addresses to connect to these
nodes. Initial authentication requires using the user heat-admin.
| ``ssh heat-admin@192.0.2.7``
To begin creating users, images, networks, servers, etc in OPNFV source the overcloudrc file or
retrieve the admin user's credentials from the overcloudrc file and connect to the web Dashboard.
To begin creating users, images, networks, servers, etc in OPNFV source the