mirror of
https://github.com/sstent/Vagrant_Openstack.git
synced 2025-12-06 06:02:02 +00:00
setting this up a clean setup
This commit is contained in:
@@ -1 +0,0 @@
|
||||
{"active":{"master":"d823bfc9-ef9e-44fc-a85e-78dfebeb26f3","controller":"66baa24c-0474-42d0-9880-89b4bb9a24cc","compute1":"af5beec5-26a5-43c6-8473-ad6f54b1fc5a"}}
|
||||
@@ -1 +1 @@
|
||||
4048e90a-240a-4146-8ac7-729aa0e0b530
|
||||
25912faa-9672-4d13-a56a-945fefde6a1b
|
||||
@@ -1 +1 @@
|
||||
2adeff38-b5e2-468e-a3c7-e74fae8e02ab
|
||||
ded2633f-4d6b-40d9-8ae4-dc83317562b5
|
||||
@@ -1,2 +1,7 @@
|
||||
Vagrant_Openstack
|
||||
=================
|
||||
|
||||
Basic 3 node puppet cluster
|
||||
1 Puppet node
|
||||
1 controller
|
||||
1 compute
|
||||
@@ -1,70 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant::Config.run do |config|
|
||||
# All Vagrant configuration is done here. The most common configuration
|
||||
# options are documented and commented below. For a complete reference,
|
||||
# please see the online documentation at vagrantup.com.
|
||||
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = "Centos64Puppet"
|
||||
|
||||
config.vm.define :master do |master_config|
|
||||
master_config.vm.host_name = "master.vagrant.info"
|
||||
master_config.vm.network :hostonly, "192.168.33.10" # Enable provisioning with chef solo, specifying a cookbooks path, roles
|
||||
# path, and data_bags path (all relative to this Vagrantfile), and adding
|
||||
# some recipes and/or roles.
|
||||
#
|
||||
# config.vm.provision :chef_solo do |chef|
|
||||
# chef.cookbooks_path = "../my-recipes/cookbooks"
|
||||
# chef.roles_path = "../my-recipes/roles"
|
||||
# chef.data_bags_path = "../my-recipes/data_bags"
|
||||
# chef.add_recipe "mysql"
|
||||
# chef.add_role "web"
|
||||
#
|
||||
# # You may also specify custom JSON attributes:
|
||||
# chef.json = { :mysql_password => "foo" }
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef server, specifying the chef server URL,
|
||||
# and the path to the validation key (relative to this Vagrantfile).
|
||||
#
|
||||
# The Opscode Platform uses HTTPS. Substitute your organization for
|
||||
# ORGNAME in the URL and validation key.
|
||||
#
|
||||
# If you have your own Chef Server, use the appropriate URL, which may be
|
||||
# HTTP instead of HTTPS depending on your configuration. Also change the
|
||||
# validation key to validation.pem.
|
||||
#
|
||||
# config.vm.provision :chef_client do |chef|
|
||||
# chef.chef_server_url = "https://api.opscode.com/organizations/ORGNAME"
|
||||
# chef.validation_key_path = "ORGNAME-validator.pem"
|
||||
# end
|
||||
#
|
||||
# If you're using the Opscode platform, your validator client is
|
||||
# ORGNAME-validator, replacing ORGNAME with your organization name.
|
||||
#
|
||||
# IF you have your own Chef Server, the default validation client name is
|
||||
# chef-validator, unless you changed the configuration.
|
||||
#
|
||||
# chef.validation_client_name = "ORGNAME-validator"
|
||||
|
||||
end
|
||||
config.vm.define :client do |client_config|
|
||||
client_config.vm.host_name = "client.vagrant.info"
|
||||
client_config.vm.network :hostonly, "192.168.33.11"
|
||||
client_config.vm.forward_port 80, 8080
|
||||
|
||||
end
|
||||
|
||||
|
||||
config.vm.boot_mode = :headless
|
||||
|
||||
# config.vm.provision :puppet do |puppet|
|
||||
# puppet.manifests_path = "manifests"
|
||||
# puppet.manifest_file = "site.pp"
|
||||
# puppet.options = "--verbose --debug"
|
||||
#
|
||||
# end
|
||||
|
||||
end
|
||||
@@ -1,503 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A fake (in-memory) hypervisor+api.
|
||||
|
||||
Allows nova testing w/o a hypervisor. This module also documents the
|
||||
semantics of real hypervisor connections.
|
||||
|
||||
"""
|
||||
|
||||
from oslo.config import cfg
|
||||
import os
|
||||
from nova.compute import power_state
|
||||
from nova.compute import task_states
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.virt import driver
|
||||
from nova.virt import virtapi
|
||||
from nova.virt import images
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('host', 'nova.netconf')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_FAKE_NODES = None
|
||||
|
||||
|
||||
def set_nodes(nodes):
|
||||
"""Sets FakeDriver's node.list.
|
||||
|
||||
It has effect on the following methods:
|
||||
get_available_nodes()
|
||||
get_available_resource
|
||||
get_host_stats()
|
||||
|
||||
To restore the change, call restore_nodes()
|
||||
"""
|
||||
global _FAKE_NODES
|
||||
_FAKE_NODES = nodes
|
||||
|
||||
|
||||
def restore_nodes():
|
||||
"""Resets FakeDriver's node list modified by set_nodes().
|
||||
|
||||
Usually called from tearDown().
|
||||
"""
|
||||
global _FAKE_NODES
|
||||
_FAKE_NODES = [CONF.host]
|
||||
|
||||
|
||||
class FakeInstance(object):
|
||||
|
||||
def __init__(self, name, state):
|
||||
self.name = name
|
||||
self.state = state
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
|
||||
class FakeDriver(driver.ComputeDriver):
|
||||
capabilities = {
|
||||
"has_imagecache": True,
|
||||
"supports_recreate": True,
|
||||
}
|
||||
|
||||
"""Fake hypervisor driver."""
|
||||
|
||||
def __init__(self, virtapi, read_only=False):
|
||||
super(FakeDriver, self).__init__(virtapi)
|
||||
self.instances = {}
|
||||
self.host_status_base = {
|
||||
'host_name-description': 'Fake Host',
|
||||
'host_hostname': CONF.host,
|
||||
'host_memory_total': 8000000000,
|
||||
'host_memory_overhead': 10000000,
|
||||
'host_memory_free': 7900000000,
|
||||
'host_memory_free_computed': 7900000000,
|
||||
'host_other_config': {},
|
||||
'host_ip_address': '192.168.1.109',
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 500000000000,
|
||||
'disk_total': 600000000000,
|
||||
'disk_used': 100000000000,
|
||||
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||
'host_name_label': 'fake-host',
|
||||
'hypervisor_hostname': CONF.host,
|
||||
}
|
||||
self._mounts = {}
|
||||
self._interfaces = {}
|
||||
if not _FAKE_NODES:
|
||||
set_nodes([CONF.host])
|
||||
|
||||
def init_host(self, host):
|
||||
return
|
||||
|
||||
def list_instances(self):
|
||||
return self.instances.keys()
|
||||
|
||||
def plug_vifs(self, instance, network_info):
|
||||
"""Plug VIFs into networks."""
|
||||
pass
|
||||
|
||||
def unplug_vifs(self, instance, network_info):
|
||||
"""Unplug VIFs from networks."""
|
||||
pass
|
||||
|
||||
def spawn(self, context, instance, image_meta, injected_files,
|
||||
admin_password, network_info=None, block_device_info=None):
|
||||
name = instance['name']
|
||||
state = power_state.RUNNING
|
||||
fake_instance = FakeInstance(name, state)
|
||||
self.instances[name] = fake_instance
|
||||
|
||||
if 'image_ref' in instance:
|
||||
image_href = instance['image_ref']
|
||||
if 'user_id' in instance:
|
||||
user_id = instance['user_id']
|
||||
if 'project_id' in instance:
|
||||
project_id = instance['user_id']
|
||||
size = instance['root_gb']
|
||||
path = "%s%s%s" % (CONF.instances_path, '/_base/',image_href)
|
||||
#LOG.warning("Fake DriverMSG: image_meta type = %s" % type(image_meta))
|
||||
#LOG.warning("Fake DriverMSG: image_meta = %s" % image_meta)
|
||||
#LOG.warning("Fake DriverMSG: instance type = %s" % type(instance))
|
||||
#LOG.warning("Fake DriverMSG: instance = %s" % instance)
|
||||
|
||||
cmd = "qemu-img', 'create', '-f', 'raw %s %s" % (path, size)
|
||||
LOG.warning("Fake Driver: base instances_path = %s" % path)
|
||||
os.system(cmd)
|
||||
#images.fetch_to_raw(context, image_href, path, user_id, project_id)
|
||||
images.fetch(context, image_href, path, user_id, project_id)
|
||||
|
||||
def live_snapshot(self, context, instance, name, update_task_state):
|
||||
if instance['name'] not in self.instances:
|
||||
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
|
||||
update_task_state(task_state=task_states.IMAGE_UPLOADING)
|
||||
|
||||
def snapshot(self, context, instance, name, update_task_state):
|
||||
if instance['name'] not in self.instances:
|
||||
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
|
||||
update_task_state(task_state=task_states.IMAGE_UPLOADING)
|
||||
|
||||
def reboot(self, context, instance, network_info, reboot_type,
|
||||
block_device_info=None, bad_volumes_callback=None):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def get_host_ip_addr():
|
||||
return '192.168.0.1'
|
||||
|
||||
def set_admin_password(self, instance, new_pass):
|
||||
pass
|
||||
|
||||
def inject_file(self, instance, b64_path, b64_contents):
|
||||
pass
|
||||
|
||||
def resume_state_on_host_boot(self, context, instance, network_info,
|
||||
block_device_info=None):
|
||||
pass
|
||||
|
||||
def rescue(self, context, instance, network_info, image_meta,
|
||||
rescue_password):
|
||||
pass
|
||||
|
||||
def unrescue(self, instance, network_info):
|
||||
pass
|
||||
|
||||
def poll_rebooting_instances(self, timeout, instances):
|
||||
pass
|
||||
|
||||
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||
instance_type, network_info,
|
||||
block_device_info=None):
|
||||
pass
|
||||
|
||||
def finish_revert_migration(self, instance, network_info,
|
||||
block_device_info=None, power_on=True):
|
||||
pass
|
||||
|
||||
def post_live_migration_at_destination(self, context, instance,
|
||||
network_info,
|
||||
block_migration=False,
|
||||
block_device_info=None):
|
||||
pass
|
||||
|
||||
def power_off(self, instance):
|
||||
pass
|
||||
|
||||
def power_on(self, context, instance, network_info, block_device_info):
|
||||
pass
|
||||
|
||||
def soft_delete(self, instance):
|
||||
pass
|
||||
|
||||
def restore(self, instance):
|
||||
pass
|
||||
|
||||
def pause(self, instance):
|
||||
pass
|
||||
|
||||
def unpause(self, instance):
|
||||
pass
|
||||
|
||||
def suspend(self, instance):
|
||||
pass
|
||||
|
||||
def resume(self, instance, network_info, block_device_info=None):
|
||||
pass
|
||||
|
||||
def destroy(self, instance, network_info, block_device_info=None,
|
||||
destroy_disks=True):
|
||||
key = instance['name']
|
||||
if key in self.instances:
|
||||
del self.instances[key]
|
||||
else:
|
||||
LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") %
|
||||
{'key': key,
|
||||
'inst': self.instances}, instance=instance)
|
||||
|
||||
def attach_volume(self, connection_info, instance, mountpoint):
|
||||
"""Attach the disk to the instance at mountpoint using info."""
|
||||
instance_name = instance['name']
|
||||
if instance_name not in self._mounts:
|
||||
self._mounts[instance_name] = {}
|
||||
self._mounts[instance_name][mountpoint] = connection_info
|
||||
return True
|
||||
|
||||
def detach_volume(self, connection_info, instance, mountpoint):
|
||||
"""Detach the disk attached to the instance."""
|
||||
try:
|
||||
del self._mounts[instance['name']][mountpoint]
|
||||
except KeyError:
|
||||
pass
|
||||
return True
|
||||
|
||||
def attach_interface(self, instance, image_meta, network_info):
|
||||
for (network, mapping) in network_info:
|
||||
if mapping['vif_uuid'] in self._interfaces:
|
||||
raise exception.InterfaceAttachFailed('duplicate')
|
||||
self._interfaces[mapping['vif_uuid']] = mapping
|
||||
|
||||
def detach_interface(self, instance, network_info):
|
||||
for (network, mapping) in network_info:
|
||||
try:
|
||||
del self._interfaces[mapping['vif_uuid']]
|
||||
except KeyError:
|
||||
raise exception.InterfaceDetachFailed('not attached')
|
||||
|
||||
def get_info(self, instance):
|
||||
if instance['name'] not in self.instances:
|
||||
raise exception.InstanceNotFound(instance_id=instance['name'])
|
||||
i = self.instances[instance['name']]
|
||||
return {'state': i.state,
|
||||
'max_mem': 0,
|
||||
'mem': 0,
|
||||
'num_cpu': 2,
|
||||
'cpu_time': 0}
|
||||
|
||||
def get_diagnostics(self, instance_name):
|
||||
return {'cpu0_time': 17300000000,
|
||||
'memory': 524288,
|
||||
'vda_errors': -1,
|
||||
'vda_read': 262144,
|
||||
'vda_read_req': 112,
|
||||
'vda_write': 5778432,
|
||||
'vda_write_req': 488,
|
||||
'vnet1_rx': 2070139,
|
||||
'vnet1_rx_drop': 0,
|
||||
'vnet1_rx_errors': 0,
|
||||
'vnet1_rx_packets': 26701,
|
||||
'vnet1_tx': 140208,
|
||||
'vnet1_tx_drop': 0,
|
||||
'vnet1_tx_errors': 0,
|
||||
'vnet1_tx_packets': 662,
|
||||
}
|
||||
|
||||
def get_all_bw_counters(self, instances):
|
||||
"""Return bandwidth usage counters for each interface on each
|
||||
running VM.
|
||||
"""
|
||||
bw = []
|
||||
return bw
|
||||
|
||||
def get_all_volume_usage(self, context, compute_host_bdms):
|
||||
"""Return usage info for volumes attached to vms on
|
||||
a given host.
|
||||
"""
|
||||
volusage = []
|
||||
return volusage
|
||||
|
||||
def block_stats(self, instance_name, disk_id):
|
||||
return [0L, 0L, 0L, 0L, None]
|
||||
|
||||
def interface_stats(self, instance_name, iface_id):
|
||||
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
|
||||
|
||||
def get_console_output(self, instance):
|
||||
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
|
||||
|
||||
def get_vnc_console(self, instance):
|
||||
return {'internal_access_path': 'FAKE',
|
||||
'host': 'fakevncconsole.com',
|
||||
'port': 6969}
|
||||
|
||||
def get_spice_console(self, instance):
|
||||
return {'internal_access_path': 'FAKE',
|
||||
'host': 'fakespiceconsole.com',
|
||||
'port': 6969,
|
||||
'tlsPort': 6970}
|
||||
|
||||
def get_console_pool_info(self, console_type):
|
||||
return {'address': '127.0.0.1',
|
||||
'username': 'fakeuser',
|
||||
'password': 'fakepassword'}
|
||||
|
||||
def refresh_security_group_rules(self, security_group_id):
|
||||
return True
|
||||
|
||||
def refresh_security_group_members(self, security_group_id):
|
||||
return True
|
||||
|
||||
def refresh_instance_security_rules(self, instance):
|
||||
return True
|
||||
|
||||
def refresh_provider_fw_rules(self):
|
||||
pass
|
||||
|
||||
def get_available_resource(self, nodename):
|
||||
"""Updates compute manager resource info on ComputeNode table.
|
||||
|
||||
Since we don't have a real hypervisor, pretend we have lots of
|
||||
disk and ram.
|
||||
"""
|
||||
if nodename not in _FAKE_NODES:
|
||||
return {}
|
||||
|
||||
dic = {'vcpus': 1,
|
||||
'memory_mb': 8192,
|
||||
'local_gb': 1028,
|
||||
'vcpus_used': 0,
|
||||
'memory_mb_used': 0,
|
||||
'local_gb_used': 0,
|
||||
'hypervisor_type': 'fake',
|
||||
'hypervisor_version': '1.0',
|
||||
'hypervisor_hostname': nodename,
|
||||
'cpu_info': '?'}
|
||||
return dic
|
||||
|
||||
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
|
||||
"""This method is supported only by libvirt."""
|
||||
raise NotImplementedError('This method is supported only by libvirt.')
|
||||
|
||||
def get_instance_disk_info(self, instance_name):
|
||||
return
|
||||
|
||||
def live_migration(self, context, instance_ref, dest,
|
||||
post_method, recover_method, block_migration=False,
|
||||
migrate_data=None):
|
||||
return
|
||||
|
||||
def check_can_live_migrate_destination_cleanup(self, ctxt,
|
||||
dest_check_data):
|
||||
return
|
||||
|
||||
def check_can_live_migrate_destination(self, ctxt, instance_ref,
|
||||
src_compute_info, dst_compute_info,
|
||||
block_migration=False,
|
||||
disk_over_commit=False):
|
||||
return {}
|
||||
|
||||
def check_can_live_migrate_source(self, ctxt, instance_ref,
|
||||
dest_check_data):
|
||||
return
|
||||
|
||||
def finish_migration(self, context, migration, instance, disk_info,
|
||||
network_info, image_meta, resize_instance,
|
||||
block_device_info=None, power_on=True):
|
||||
return
|
||||
|
||||
def confirm_migration(self, migration, instance, network_info):
|
||||
return
|
||||
|
||||
def pre_live_migration(self, context, instance_ref, block_device_info,
|
||||
network_info, disk, migrate_data=None):
|
||||
return
|
||||
|
||||
def unfilter_instance(self, instance_ref, network_info):
|
||||
"""This method is supported only by libvirt."""
|
||||
raise NotImplementedError('This method is supported only by libvirt.')
|
||||
|
||||
def test_remove_vm(self, instance_name):
|
||||
"""Removes the named VM, as if it crashed. For testing."""
|
||||
self.instances.pop(instance_name)
|
||||
|
||||
def get_host_stats(self, refresh=False):
|
||||
"""Return fake Host Status of ram, disk, network."""
|
||||
stats = []
|
||||
for nodename in _FAKE_NODES:
|
||||
host_status = self.host_status_base.copy()
|
||||
host_status['hypervisor_hostname'] = nodename
|
||||
host_status['host_hostname'] = nodename
|
||||
host_status['host_name_label'] = nodename
|
||||
stats.append(host_status)
|
||||
if len(stats) == 0:
|
||||
raise exception.NovaException("FakeDriver has no node")
|
||||
elif len(stats) == 1:
|
||||
return stats[0]
|
||||
else:
|
||||
return stats
|
||||
|
||||
def host_power_action(self, host, action):
|
||||
"""Reboots, shuts down or powers up the host."""
|
||||
return action
|
||||
|
||||
def host_maintenance_mode(self, host, mode):
|
||||
"""Start/Stop host maintenance window. On start, it triggers
|
||||
guest VMs evacuation.
|
||||
"""
|
||||
if not mode:
|
||||
return 'off_maintenance'
|
||||
return 'on_maintenance'
|
||||
|
||||
def set_host_enabled(self, host, enabled):
|
||||
"""Sets the specified host's ability to accept new instances."""
|
||||
if enabled:
|
||||
return 'enabled'
|
||||
return 'disabled'
|
||||
|
||||
def get_disk_available_least(self):
|
||||
pass
|
||||
|
||||
def get_volume_connector(self, instance):
|
||||
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
|
||||
|
||||
def get_available_nodes(self):
|
||||
return _FAKE_NODES
|
||||
|
||||
def instance_on_disk(self, instance):
|
||||
return False
|
||||
|
||||
def list_instance_uuids(self):
|
||||
return []
|
||||
|
||||
def legacy_nwinfo(self):
|
||||
return True
|
||||
|
||||
|
||||
class FakeVirtAPI(virtapi.VirtAPI):
|
||||
def instance_update(self, context, instance_uuid, updates):
|
||||
return db.instance_update_and_get_original(context,
|
||||
instance_uuid,
|
||||
updates)
|
||||
|
||||
def aggregate_get_by_host(self, context, host, key=None):
|
||||
return db.aggregate_get_by_host(context, host, key=key)
|
||||
|
||||
def aggregate_metadata_add(self, context, aggregate, metadata,
|
||||
set_delete=False):
|
||||
return db.aggregate_metadata_add(context, aggregate['id'], metadata,
|
||||
set_delete=set_delete)
|
||||
|
||||
def aggregate_metadata_delete(self, context, aggregate, key):
|
||||
return db.aggregate_metadata_delete(context, aggregate['id'], key)
|
||||
|
||||
def security_group_get_by_instance(self, context, instance):
|
||||
return db.security_group_get_by_instance(context, instance['uuid'])
|
||||
|
||||
def security_group_rule_get_by_security_group(self, context,
|
||||
security_group):
|
||||
return db.security_group_rule_get_by_security_group(
|
||||
context, security_group['id'])
|
||||
|
||||
def provider_fw_rule_get_all(self, context):
|
||||
return db.provider_fw_rule_get_all(context)
|
||||
|
||||
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
|
||||
return db.agent_build_get_by_triple(context,
|
||||
hypervisor, os, architecture)
|
||||
|
||||
def instance_type_get(self, context, instance_type_id):
|
||||
return db.instance_type_get(context, instance_type_id)
|
||||
@@ -1,7 +1,7 @@
|
||||
node puppet {
|
||||
|
||||
#ensure git is installed
|
||||
package { 'puppetlabs-release-6-7':
|
||||
package {
|
||||
'puppetlabs-release-6-7':
|
||||
provider => 'rpm',
|
||||
ensure => installed,
|
||||
source => "http://yum.puppetlabs.com/el/6/products/i386/puppetlabs-release-6-7.noarch.rpm";
|
||||
@@ -15,51 +15,6 @@ node puppet {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
vcsrepo {
|
||||
'/etc/puppet/modules/openstack':
|
||||
require => Package["puppet-server"],
|
||||
ensure => latest,
|
||||
provider => git,
|
||||
source => 'https://sstent:farscape5@github.com/stratustech/puppet-openstack.git',
|
||||
notify => File["/etc/puppet/modules/openstack"];
|
||||
'/etc/puppet/modules/stratus':
|
||||
require => Package["puppet-server"],
|
||||
ensure => latest,
|
||||
provider => git,
|
||||
source => 'https://sstent:farscape5@github.com/stratustech/POC_ALPHA_stratusmodule.git';
|
||||
'/etc/puppet/manifests':
|
||||
require => [Package["puppet-server"],File['/etc/puppet/manifests']],
|
||||
ensure => latest,
|
||||
provider => git,
|
||||
source => 'https://sstent:farscape5@github.com/stratustech/POC_ALPHA_puppet_manifests.git';
|
||||
'/etc/puppet/modules/rabbitmq':
|
||||
require => Package["puppet-server"],
|
||||
ensure => latest,
|
||||
provider => git,
|
||||
source => 'https://github.com/gergnz/puppetlabs-rabbitmq.git';
|
||||
'/etc/puppet/modules/horizon':
|
||||
require => Package["puppet-server"],
|
||||
ensure => latest,
|
||||
provider => git,
|
||||
source => 'https://sstent:farscape5@github.com/stratustech/puppet-horizon.git';
|
||||
}
|
||||
|
||||
file { "/etc/puppet/modules/openstack":
|
||||
require => Package["puppet-server"],
|
||||
ensure => "directory",
|
||||
owner => "root",
|
||||
group => "root",
|
||||
mode => 755,
|
||||
recurse => true,
|
||||
notify => Exec["sudo rake modules:clone"];
|
||||
"/etc/puppet/manifests":
|
||||
require => Package["puppet-server"],
|
||||
before => Vcsrepo['/etc/puppet/manifests'],
|
||||
force => true,
|
||||
backup => false,
|
||||
ensure => "absent";
|
||||
"/etc/puppet/autosign.conf":
|
||||
require => Package["puppet-server"],
|
||||
owner => "root",
|
||||
@@ -68,17 +23,11 @@ file { "/etc/puppet/modules/openstack":
|
||||
content => "*";
|
||||
}
|
||||
|
||||
exec {"sudo rake modules:clone":
|
||||
require => [Package["rubygem-rake"],Vcsrepo['/etc/puppet/modules/horizon','/etc/puppet/modules/rabbitmq','/etc/puppet/manifests','/etc/puppet/modules/openstack']],
|
||||
cwd => "/etc/puppet/modules/openstack",
|
||||
path => ["/usr/local/bin","/bin","/usr/bin","/usr/local/sbin","/usr/sbin","/sbin","/home/vagrant/bin"];
|
||||
}
|
||||
|
||||
service { "iptables":
|
||||
ensure => "stopped",
|
||||
enable => false;
|
||||
"puppetmaster":
|
||||
require => [File["/etc/puppet/autosign.conf"],Exec["sudo rake modules:clone"]],
|
||||
require => File["/etc/puppet/autosign.conf"],
|
||||
ensure => "running",
|
||||
enable => true;
|
||||
}
|
||||
@@ -98,28 +47,18 @@ host { 'controller.vagrant.info':
|
||||
|
||||
node controller {
|
||||
|
||||
#ensure git is installed
|
||||
package { 'puppetlabs-release-6-7':
|
||||
package {
|
||||
'puppetlabs-release-6-7':
|
||||
provider => 'rpm',
|
||||
ensure => installed,
|
||||
source => "http://yum.puppetlabs.com/el/6/products/i386/puppetlabs-release-6-7.noarch.rpm";
|
||||
'puppet':
|
||||
require => Package["puppetlabs-release-6-7"],
|
||||
ensure => 'present';
|
||||
}
|
||||
}
|
||||
|
||||
service { "iptables":
|
||||
ensure => "stopped",
|
||||
enable => false;
|
||||
"puppet":
|
||||
require => Package["puppet"],
|
||||
ensure => "running",
|
||||
enable => true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
host { 'puppet.vagrant.info':
|
||||
host {
|
||||
'puppet.vagrant.info':
|
||||
ip => '192.168.33.10',
|
||||
host_aliases => 'puppet';
|
||||
'compute1.vagrant.info':
|
||||
@@ -149,8 +88,6 @@ service { "puppet":
|
||||
enable => true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
host { 'puppet.vagrant.info':
|
||||
ip => '192.168.33.10',
|
||||
host_aliases => 'puppet';
|
||||
|
||||
Reference in New Issue
Block a user