Adding CORD specific ceilometer changes to monitoring repository
- ceilometer custom notification plugins for ONOS, vSG, vOLT and Infra layers
- ceilometer publish/subscribe module
- ceilometer dynamic pipeline config module
- ceilometer UDP proxy
- ceilometer Custom Image(ceilometer -v2 -v3 versions,kafka_installer,startup scripts)

Change-Id: Ie2ab8ce89cdadbd1fb4dc54ee15e46f8cc8c4c18
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/README.md b/xos/synchronizer/ceilometer/ceilometer-plugins/README.md
new file mode 100644
index 0000000..710ca94
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/README.md
@@ -0,0 +1 @@
+# ceilometer-plugins
\ No newline at end of file
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/ceilometer_entry_points.txt b/xos/synchronizer/ceilometer/ceilometer-plugins/ceilometer_entry_points.txt
new file mode 100644
index 0000000..e30c4ee
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/ceilometer_entry_points.txt
@@ -0,0 +1,14 @@
+[ceilometer.notification]
+#Existing Notification endpoints
+vcpe= ceilometer.network.ext_services.vcpe.notifications:VCPENotification
+vcpe.compute.stats= ceilometer.network.ext_services.vcpe.notifications:VCPEComputeStatistics
+vcpe.dns.cache.size= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSCacheSize
+vcpe.dns.total_instered_entries= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSTotalInsertedEntries
+vcpe.dns.replaced_unexpired_entries= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSReplacedUnexpiredEntries
+vcpe.dns.queries_answered_locally= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSQueriesAnsweredLocally
+vcpe.dns.queries_forwarded= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSQueriesForwarded
+vcpe.dns.server.queries_sent= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSServerQueriesSent
+vcpe.dns.server.queries_failed= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSServerQueriesFailed
+volt.device= ceilometer.network.ext_services.volt.notifications:VOLTDeviceNotification
+volt.device.subscribers= ceilometer.network.ext_services.volt.notifications:VOLTDeviceSubscriberNotification
+infra=ceilometer.network.ext_services.openstack_infra.notifications:OPENSTACK_INFRANotification
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/entry_points.txt b/xos/synchronizer/ceilometer/ceilometer-plugins/entry_points.txt
new file mode 100644
index 0000000..f2b1a5e
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/entry_points.txt
@@ -0,0 +1,350 @@
+[ceilometer.alarm.evaluator]
+combination = ceilometer.alarm.evaluator.combination:CombinationEvaluator
+gnocchi_aggregation_by_metrics_threshold = ceilometer.alarm.evaluator.gnocchi:GnocchiThresholdEvaluator
+gnocchi_aggregation_by_resources_threshold = ceilometer.alarm.evaluator.gnocchi:GnocchiThresholdEvaluator
+gnocchi_resources_threshold = ceilometer.alarm.evaluator.gnocchi:GnocchiThresholdEvaluator
+threshold = ceilometer.alarm.evaluator.threshold:ThresholdEvaluator
+
+[ceilometer.alarm.evaluator_service]
+ceilometer.alarm.service.PartitionedAlarmService = ceilometer.alarm.service:PartitionedAlarmService
+ceilometer.alarm.service.SingletonAlarmService = ceilometer.alarm.service:SingletonAlarmService
+default = ceilometer.alarm.service:AlarmEvaluationService
+partitioned = ceilometer.alarm.service:PartitionedAlarmService
+singleton = ceilometer.alarm.service:SingletonAlarmService
+
+[ceilometer.alarm.notifier]
+http = ceilometer.alarm.notifier.rest:RestAlarmNotifier
+https = ceilometer.alarm.notifier.rest:RestAlarmNotifier
+log = ceilometer.alarm.notifier.log:LogAlarmNotifier
+test = ceilometer.alarm.notifier.test:TestAlarmNotifier
+trust+http = ceilometer.alarm.notifier.trust:TrustRestAlarmNotifier
+trust+https = ceilometer.alarm.notifier.trust:TrustRestAlarmNotifier
+
+[ceilometer.alarm.rule]
+combination = ceilometer.api.controllers.v2.alarm_rules.combination:AlarmCombinationRule
+gnocchi_aggregation_by_metrics_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricsByIdLookupRule
+gnocchi_aggregation_by_resources_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricByResourcesLookupRule
+gnocchi_resources_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:MetricOfResourceRule
+threshold = ceilometer.api.controllers.v2.alarm_rules.threshold:AlarmThresholdRule
+
+[ceilometer.alarm.storage]
+db2 = ceilometer.alarm.storage.impl_db2:Connection
+hbase = ceilometer.alarm.storage.impl_hbase:Connection
+log = ceilometer.alarm.storage.impl_log:Connection
+mongodb = ceilometer.alarm.storage.impl_mongodb:Connection
+mysql = ceilometer.alarm.storage.impl_sqlalchemy:Connection
+postgresql = ceilometer.alarm.storage.impl_sqlalchemy:Connection
+sqlite = ceilometer.alarm.storage.impl_sqlalchemy:Connection
+
+[ceilometer.compute.virt]
+hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector
+libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector
+vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector
+xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector
+
+[ceilometer.discover]
+endpoint = ceilometer.agent.discovery.endpoint:EndpointDiscovery
+fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery
+fw_services = ceilometer.network.services.discovery:FirewallDiscovery
+ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery
+lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery
+lb_members = ceilometer.network.services.discovery:LBMembersDiscovery
+lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery
+lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery
+local_instances = ceilometer.compute.discovery:InstanceDiscovery
+local_node = ceilometer.agent.discovery.localnode:LocalNodeDiscovery
+tenant = ceilometer.agent.discovery.tenant:TenantDiscovery
+tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO
+vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery
+
+[ceilometer.dispatcher]
+database = ceilometer.dispatcher.database:DatabaseDispatcher
+file = ceilometer.dispatcher.file:FileDispatcher
+http = ceilometer.dispatcher.http:HttpDispatcher
+
+[ceilometer.event.publisher]
+direct = ceilometer.publisher.direct:DirectPublisher
+kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher
+notifier = ceilometer.publisher.messaging:EventNotifierPublisher
+test = ceilometer.publisher.test:TestPublisher
+
+[ceilometer.event.storage]
+db2 = ceilometer.event.storage.impl_db2:Connection
+es = ceilometer.event.storage.impl_elasticsearch:Connection
+hbase = ceilometer.event.storage.impl_hbase:Connection
+log = ceilometer.event.storage.impl_log:Connection
+mongodb = ceilometer.event.storage.impl_mongodb:Connection
+mysql = ceilometer.event.storage.impl_sqlalchemy:Connection
+postgresql = ceilometer.event.storage.impl_sqlalchemy:Connection
+sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection
+
+[ceilometer.event.trait_plugin]
+bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin
+split = ceilometer.event.trait_plugins:SplitterTraitPlugin
+
+[ceilometer.hardware.inspectors]
+snmp = ceilometer.hardware.inspector.snmp:SNMPInspector
+
+[ceilometer.metering.storage]
+db2 = ceilometer.storage.impl_db2:Connection
+hbase = ceilometer.storage.impl_hbase:Connection
+log = ceilometer.storage.impl_log:Connection
+mongodb = ceilometer.storage.impl_mongodb:Connection
+mysql = ceilometer.storage.impl_sqlalchemy:Connection
+postgresql = ceilometer.storage.impl_sqlalchemy:Connection
+sqlite = ceilometer.storage.impl_sqlalchemy:Connection
+
+[ceilometer.notification]
+authenticate = ceilometer.identity.notifications:Authenticate
+bandwidth = ceilometer.network.notifications:Bandwidth
+cpu_frequency = ceilometer.compute.notifications.cpu:CpuFrequency
+cpu_idle_percent = ceilometer.compute.notifications.cpu:CpuIdlePercent
+cpu_idle_time = ceilometer.compute.notifications.cpu:CpuIdleTime
+cpu_iowait_percent = ceilometer.compute.notifications.cpu:CpuIowaitPercent
+cpu_iowait_time = ceilometer.compute.notifications.cpu:CpuIowaitTime
+cpu_kernel_percent = ceilometer.compute.notifications.cpu:CpuKernelPercent
+cpu_kernel_time = ceilometer.compute.notifications.cpu:CpuKernelTime
+cpu_percent = ceilometer.compute.notifications.cpu:CpuPercent
+cpu_user_percent = ceilometer.compute.notifications.cpu:CpuUserPercent
+cpu_user_time = ceilometer.compute.notifications.cpu:CpuUserTime
+data_processing = ceilometer.data_processing.notifications:DataProcessing
+disk_ephemeral_size = ceilometer.compute.notifications.instance:EphemeralDiskSize
+disk_root_size = ceilometer.compute.notifications.instance:RootDiskSize
+floatingip = ceilometer.network.notifications:FloatingIP
+group = ceilometer.identity.notifications:Group
+hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification
+hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification
+hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification
+hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification
+http.request = ceilometer.middleware:HTTPRequest
+http.response = ceilometer.middleware:HTTPResponse
+image = ceilometer.image.notifications:Image
+image_crud = ceilometer.image.notifications:ImageCRUD
+image_download = ceilometer.image.notifications:ImageDownload
+image_serve = ceilometer.image.notifications:ImageServe
+image_size = ceilometer.image.notifications:ImageSize
+instance = ceilometer.compute.notifications.instance:Instance
+instance_delete = ceilometer.compute.notifications.instance:InstanceDelete
+instance_flavor = ceilometer.compute.notifications.instance:InstanceFlavor
+instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled
+magnetodb_index_count = ceilometer.key_value_storage.notifications:Index
+magnetodb_table = ceilometer.key_value_storage.notifications:Table
+memory = ceilometer.compute.notifications.instance:Memory
+network = ceilometer.network.notifications:Network
+network.services.firewall = ceilometer.network.notifications:Firewall
+network.services.firewall.policy = ceilometer.network.notifications:FirewallPolicy
+network.services.firewall.rule = ceilometer.network.notifications:FirewallRule
+network.services.lb.health_monitor = ceilometer.network.notifications:HealthMonitor
+network.services.lb.member = ceilometer.network.notifications:Member
+network.services.lb.pool = ceilometer.network.notifications:Pool
+network.services.lb.vip = ceilometer.network.notifications:Vip
+network.services.vpn = ceilometer.network.notifications:VPNService
+network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection
+network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy
+network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy
+objectstore.request = ceilometer.objectstore.notifications:SwiftWsgiMiddleware
+objectstore.request.meters = ceilometer.objectstore.notifications:SwiftWsgiMiddlewareMeters
+port = ceilometer.network.notifications:Port
+profiler = ceilometer.profiler.notifications:ProfilerNotifications
+project = ceilometer.identity.notifications:Project
+role = ceilometer.identity.notifications:Role
+role_assignment = ceilometer.identity.notifications:RoleAssignment
+router = ceilometer.network.notifications:Router
+snapshot = ceilometer.volume.notifications:Snapshot
+snapshot_crud = ceilometer.volume.notifications:SnapshotCRUD
+snapshot_size = ceilometer.volume.notifications:SnapshotSize
+stack_crud = ceilometer.orchestration.notifications:StackCRUD
+subnet = ceilometer.network.notifications:Subnet
+trust = ceilometer.identity.notifications:Trust
+user = ceilometer.identity.notifications:User
+vcpus = ceilometer.compute.notifications.instance:VCpus
+volume = ceilometer.volume.notifications:Volume
+volume_crud = ceilometer.volume.notifications:VolumeCRUD
+volume_size = ceilometer.volume.notifications:VolumeSize
+vcpe= ceilometer.network.ext_services.vcpe.notifications:VCPENotification
+vcpe.compute.stats= ceilometer.network.ext_services.vcpe.notifications:VCPEComputeStatistics
+vcpe.dns.cache.size= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSCacheSize
+vcpe.dns.total_instered_entries= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSTotalInsertedEntries
+vcpe.dns.replaced_unexpired_entries= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSReplacedUnexpiredEntries
+vcpe.dns.queries_answered_locally= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSQueriesAnsweredLocally
+vcpe.dns.queries_forwarded= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSQueriesForwarded
+vcpe.dns.server.queries_sent= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSServerQueriesSent
+vcpe.dns.server.queries_failed= ceilometer.network.ext_services.vcpe.notifications:VCPEDNSServerQueriesFailed
+volt.device= ceilometer.network.ext_services.volt.notifications:VOLTDeviceNotification
+volt.device.subscribers= ceilometer.network.ext_services.volt.notifications:VOLTDeviceSubscriberNotification
+
+[ceilometer.poll.central]
+energy = ceilometer.energy.kwapi:EnergyPollster
+hardware.cpu.load.15min = ceilometer.hardware.pollsters.cpu:CPULoad15MinPollster
+hardware.cpu.load.1min = ceilometer.hardware.pollsters.cpu:CPULoad1MinPollster
+hardware.cpu.load.5min = ceilometer.hardware.pollsters.cpu:CPULoad5MinPollster
+hardware.disk.size.total = ceilometer.hardware.pollsters.disk:DiskTotalPollster
+hardware.disk.size.used = ceilometer.hardware.pollsters.disk:DiskUsedPollster
+hardware.memory.swap.avail = ceilometer.hardware.pollsters.memory:MemorySwapAvailPollster
+hardware.memory.swap.total = ceilometer.hardware.pollsters.memory:MemorySwapTotalPollster
+hardware.memory.total = ceilometer.hardware.pollsters.memory:MemoryTotalPollster
+hardware.memory.used = ceilometer.hardware.pollsters.memory:MemoryUsedPollster
+hardware.network.incoming.bytes = ceilometer.hardware.pollsters.net:IncomingBytesPollster
+hardware.network.ip.incoming.datagrams = ceilometer.hardware.pollsters.network_aggregated:NetworkAggregatedIPInReceives
+hardware.network.ip.outgoing.datagrams = ceilometer.hardware.pollsters.network_aggregated:NetworkAggregatedIPOutRequests
+hardware.network.outgoing.bytes = ceilometer.hardware.pollsters.net:OutgoingBytesPollster
+hardware.network.outgoing.errors = ceilometer.hardware.pollsters.net:OutgoingErrorsPollster
+hardware.system_stats.cpu.idle = ceilometer.hardware.pollsters.system:SystemCpuIdlePollster
+hardware.system_stats.io.incoming.blocks = ceilometer.hardware.pollsters.system:SystemIORawReceivedPollster
+hardware.system_stats.io.outgoing.blocks = ceilometer.hardware.pollsters.system:SystemIORawSentPollster
+image = ceilometer.image.glance:ImagePollster
+image.size = ceilometer.image.glance:ImageSizePollster
+ip.floating = ceilometer.network.floatingip:FloatingIPPollster
+network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster
+network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster
+network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster
+network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster
+network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster
+network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster
+network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster
+network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster
+network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster
+network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster
+network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster
+network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster
+power = ceilometer.energy.kwapi:PowerPollster
+rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster
+rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster
+rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster
+rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster
+rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster
+rgw.usage = ceilometer.objectstore.rgw:UsagePollster
+storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster
+storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster
+storage.objects = ceilometer.objectstore.swift:ObjectsPollster
+storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster
+storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster
+switch = ceilometer.network.statistics.switch:SWPollster
+switch.flow = ceilometer.network.statistics.flow:FlowPollster
+switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes
+switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds
+switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds
+switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets
+switch.port = ceilometer.network.statistics.port:PortPollster
+switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount
+switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes
+switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors
+switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops
+switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors
+switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors
+switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors
+switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets
+switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes
+switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops
+switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors
+switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets
+switch.table = ceilometer.network.statistics.table:TablePollster
+switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries
+switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets
+switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets
+
+[ceilometer.poll.compute]
+cpu = ceilometer.compute.pollsters.cpu:CPUPollster
+cpu_util = ceilometer.compute.pollsters.cpu:CPUUtilPollster
+disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster
+disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster
+disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster
+disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster
+disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster
+disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster
+disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster
+disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster
+disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster
+disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster
+disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster
+disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster
+disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster
+disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster
+disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster
+disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster
+disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster
+disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster
+disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster
+disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster
+disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster
+disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster
+disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster
+disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster
+disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster
+disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster
+instance = ceilometer.compute.pollsters.instance:InstancePollster
+instance_flavor = ceilometer.compute.pollsters.instance:InstanceFlavorPollster
+memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster
+memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster
+network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster
+network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster
+network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster
+network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster
+network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster
+network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster
+
+[ceilometer.poll.ipmi]
+hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster
+hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster
+hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster
+hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster
+hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster
+hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster
+hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster
+hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster
+hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster
+hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster
+hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster
+hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster
+
+[ceilometer.publisher]
+direct = ceilometer.publisher.direct:DirectPublisher
+file = ceilometer.publisher.file:FilePublisher
+kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher
+meter = ceilometer.publisher.messaging:RPCPublisher
+meter_publisher = ceilometer.publisher.messaging:RPCPublisher
+notifier = ceilometer.publisher.messaging:SampleNotifierPublisher
+rpc = ceilometer.publisher.messaging:RPCPublisher
+test = ceilometer.publisher.test:TestPublisher
+udp = ceilometer.publisher.udp:UDPPublisher
+
+[ceilometer.transformer]
+accumulator = ceilometer.transformer.accumulator:TransformerAccumulator
+aggregator = ceilometer.transformer.conversions:AggregatorTransformer
+arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer
+rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer
+unit_conversion = ceilometer.transformer.conversions:ScalingTransformer
+
+[console_scripts]
+ceilometer-agent-central = ceilometer.cmd.polling:main_central
+ceilometer-agent-compute = ceilometer.cmd.polling:main_compute
+ceilometer-agent-ipmi = ceilometer.cmd.polling:main_ipmi
+ceilometer-agent-notification = ceilometer.cmd.agent_notification:main
+ceilometer-alarm-evaluator = ceilometer.cmd.alarm:evaluator
+ceilometer-alarm-notifier = ceilometer.cmd.alarm:notifier
+ceilometer-api = ceilometer.cmd.api:main
+ceilometer-collector = ceilometer.cmd.collector:main
+ceilometer-dbsync = ceilometer.cmd.storage:dbsync
+ceilometer-expirer = ceilometer.cmd.storage:expirer
+ceilometer-polling = ceilometer.cmd.polling:main
+ceilometer-rootwrap = oslo_rootwrap.cmd:main
+ceilometer-send-sample = ceilometer.cli:send_sample
+
+[network.statistics.drivers]
+opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver
+opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver
+onos = ceilometer.network.statistics.onos.driver:ONOSDriver
+
+[oslo.config.opts]
+ceilometer = ceilometer.opts:list_opts
+
+[oslo.messaging.notify.drivers]
+ceilometer.openstack.common.notifier.log_notifier = oslo.messaging.notify._impl_log:LogDriver
+ceilometer.openstack.common.notifier.no_op_notifier = oslo.messaging.notify._impl_noop:NoOpDriver
+ceilometer.openstack.common.notifier.rpc_notifier = oslo.messaging.notify._impl_messaging:MessagingDriver
+ceilometer.openstack.common.notifier.rpc_notifier2 = oslo.messaging.notify._impl_messaging:MessagingV2Driver
+ceilometer.openstack.common.notifier.test_notifier = oslo.messaging.notify._impl_test:TestDriver
+
+[paste.filter_factory]
+swift = ceilometer.objectstore.swift_middleware:filter_factory
+
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/__init__.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/__init__.py
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/__init__.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/__init__.py
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/notifications.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/notifications.py
new file mode 100644
index 0000000..807672d
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/notifications.py
@@ -0,0 +1,67 @@
+#
+# Copyright 2012 New Dream Network, LLC (DreamHost)
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Handler for producing network counter messages from Neutron notification
+   events.
+
+"""
+
+import oslo_messaging
+from oslo_config import cfg
+
+from ceilometer.agent import plugin_base
+from oslo_log import log
+from ceilometer import sample
+
+OPTS = [
+    cfg.StrOpt('openstack_infra_service_control_exchange',
+               default='openstack_infra',
+               help="Exchange name for INFRA notifications."),
+]
+
+cfg.CONF.register_opts(OPTS)
+
+LOG = log.getLogger(__name__)
+
+
+class OPENSTACK_INFRANotificationBase(plugin_base.NotificationBase):
+
+    resource_name = None
+
+    def get_targets(self,conf):
+        """Return a sequence of oslo.messaging.Target
+        This sequence is defining the exchange and topics to be connected for
+        this plugin.
+        """
+        LOG.info("get_targets for OPENSTACK INFRA Notification Listener")
+        return [oslo_messaging.Target(topic=topic,
+                                      exchange=conf.openstack_infra_service_control_exchange)
+                for topic in self.get_notification_topics(conf)]
+
+class OPENSTACK_INFRANotification(OPENSTACK_INFRANotificationBase):
+
+    resource_name = None
+    event_types = ['infra$']
+
+    def process_notification(self, message):
+        LOG.info('Received OPENSTACK INFRA notification: resource_id =%(resource_id)s' % {'resource_id': message['payload']['resource_id']})
+        yield sample.Sample.from_notification(
+            name=message['payload']['counter_name'],
+            type=message['payload']['counter_type'],
+            unit=message['payload']['counter_unit'],
+            volume=message['payload']['counter_volume'],   
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['project_id'],
+            resource_id=message['payload']['resource_id'],
+            message=message)
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/vcpe/__init__.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/vcpe/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/vcpe/__init__.py
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/vcpe/notifications.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/vcpe/notifications.py
new file mode 100644
index 0000000..06a2eb7
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/vcpe/notifications.py
@@ -0,0 +1,272 @@
+#
+# Copyright 2012 New Dream Network, LLC (DreamHost)
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Handler for producing network counter messages from Neutron notification
+   events.
+
+"""
+
+import oslo_messaging
+from oslo_config import cfg
+
+from ceilometer.agent import plugin_base
+from oslo_log import log
+from ceilometer import sample
+
+OPTS = [
+    cfg.StrOpt('vsgservice_control_exchange',
+               default='vcpeservice',
+               help="Exchange name for VCPE notifications."),
+]
+
+cfg.CONF.register_opts(OPTS)
+
+LOG = log.getLogger(__name__)
+
+
+class VCPENotificationBase(plugin_base.NotificationBase):
+
+    resource_name = None
+
+    def get_targets(self,conf):
+        """Return a sequence of oslo.messaging.Target
+
+        This sequence is defining the exchange and topics to be connected for
+        this plugin.
+        """
+        LOG.info("SRIKANTH: get_targets for VCPE Notification Listener")
+        return [oslo_messaging.Target(topic=topic,
+                                      exchange=conf.vsgservice_control_exchange)
+                for topic in self.get_notification_topics(conf)]
+
+class VCPENotification(VCPENotificationBase):
+
+    resource_name = None
+    event_types = ['vcpe$']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VCPE notification: vcpe_id=%(vcpe_id)s' % {'vcpe_id': message['payload']['vcpe_id']})
+        yield sample.Sample.from_notification(
+            name='vsg',
+            type=sample.TYPE_GAUGE,
+            unit='vsg',
+            volume=1,
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['tenant_id'],
+            resource_id=message['payload']['vcpe_id'],
+            message=message)
+
+class VCPEComputeStatistics(VCPENotificationBase):
+
+    resource_name = None
+    event_types = ['vcpe.compute.stats']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VCPE event vcpe.compute.stats')
+        if message['payload']:
+            if 'cpu_util' in message['payload']:
+                yield sample.Sample.from_notification(
+                    name='cpu_util',
+                    type=sample.TYPE_GAUGE,
+                    unit='%',
+                    volume=float(message['payload']['cpu_util']),
+                    user_id=message['payload']['user_id'],
+                    project_id=message['payload']['tenant_id'],
+                    resource_id=message['payload']['vcpe_id'],
+                    message=message)
+            if 'memory' in message['payload']:
+                yield sample.Sample.from_notification(
+                    name='memory',
+                    type=sample.TYPE_GAUGE,
+                    unit='MB',
+                    volume=float(message['payload']['memory']),
+                    user_id=message['payload']['user_id'],
+                    project_id=message['payload']['tenant_id'],
+                    resource_id=message['payload']['vcpe_id'],
+                    message=message)
+            if 'memory_usage' in message['payload']:
+                yield sample.Sample.from_notification(
+                    name='memory.usage',
+                    type=sample.TYPE_GAUGE,
+                    unit='MB',
+                    volume=float(message['payload']['memory_usage']),
+                    user_id=message['payload']['user_id'],
+                    project_id=message['payload']['tenant_id'],
+                    resource_id=message['payload']['vcpe_id'],
+                    message=message)
+            if 'network_stats' in message['payload']:
+                for intf in message['payload']['network_stats']:
+                    resource_id = message['payload']['vcpe_id'] + '-' + intf['intf']
+                    if 'rx_bytes' in intf:
+                        yield sample.Sample.from_notification(
+                            name='network.incoming.bytes',
+                            type=sample.TYPE_CUMULATIVE,
+                            unit='B',
+                            volume=float(intf['rx_bytes']),
+                            user_id=message['payload']['user_id'],
+                            project_id=message['payload']['tenant_id'],
+                            resource_id=resource_id,
+                            message=message)
+                    if 'tx_bytes' in intf:
+                        yield sample.Sample.from_notification(
+                            name='network.outgoing.bytes',
+                            type=sample.TYPE_CUMULATIVE,
+                            unit='B',
+                            volume=float(intf['tx_bytes']),
+                            user_id=message['payload']['user_id'],
+                            project_id=message['payload']['tenant_id'],
+                            resource_id=resource_id,
+                            message=message)
+                    if 'rx_packets' in intf:
+                        yield sample.Sample.from_notification(
+                            name='network.incoming.packets',
+                            type=sample.TYPE_CUMULATIVE,
+                            unit='packet',
+                            volume=float(intf['rx_packets']),
+                            user_id=message['payload']['user_id'],
+                            project_id=message['payload']['tenant_id'],
+                            resource_id=resource_id,
+                            message=message)
+                    if 'tx_packets' in intf:
+                        yield sample.Sample.from_notification(
+                            name='network.outgoing.packets',
+                            type=sample.TYPE_CUMULATIVE,
+                            unit='packet',
+                            volume=float(intf['tx_packets']),
+                            user_id=message['payload']['user_id'],
+                            project_id=message['payload']['tenant_id'],
+                            resource_id=resource_id,
+                            message=message)
+
+class VCPEDNSCacheSize(VCPENotificationBase):
+
+    resource_name = None
+    event_types = ['vcpe.dns.cache.size']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VCPE cache.size notification')
+        yield sample.Sample.from_notification(
+            name='vsg.dns.cache.size',
+            type=sample.TYPE_GAUGE,
+            unit='entries',
+            volume=float(message['payload']['cache_size']),
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['tenant_id'],
+            resource_id=message['payload']['vcpe_id'],
+            message=message)
+
+class VCPEDNSTotalInsertedEntries(VCPENotificationBase):
+
+    resource_name = None
+    event_types = ['vcpe.dns.total_instered_entries']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VCPE total_instered_entries notification')
+        yield sample.Sample.from_notification(
+            name='vsg.dns.total_instered_entries',
+            type=sample.TYPE_CUMULATIVE,
+            unit='entries',
+            volume=float(message['payload']['total_instered_entries']),
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['tenant_id'],
+            resource_id=message['payload']['vcpe_id'],
+            message=message)
+
+class VCPEDNSReplacedUnexpiredEntries(VCPENotificationBase):
+
+    resource_name = None
+    event_types = ['vcpe.dns.replaced_unexpired_entries']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VCPE replaced_unexpired_entries notification')
+        yield sample.Sample.from_notification(
+            name='vsg.dns.replaced_unexpired_entries',
+            type=sample.TYPE_CUMULATIVE,
+            unit='entries',
+            volume=float(message['payload']['replaced_unexpired_entries']),
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['tenant_id'],
+            resource_id=message['payload']['vcpe_id'],
+            message=message)
+
+class VCPEDNSQueriesForwarded(VCPENotificationBase):
+
+    resource_name = None
+    event_types = ['vcpe.dns.queries_forwarded']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VCPE queries_forwarded notification')
+        yield sample.Sample.from_notification(
+            name='vsg.dns.queries_forwarded',
+            type=sample.TYPE_CUMULATIVE,
+            unit='queries',
+            volume=float(message['payload']['queries_forwarded']),
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['tenant_id'],
+            resource_id=message['payload']['vcpe_id'],
+            message=message)
+
+class VCPEDNSQueriesAnsweredLocally(VCPENotificationBase):
+
+    resource_name = None
+    event_types = ['vcpe.dns.queries_answered_locally']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VCPE queries_answered_locally notification')
+        yield sample.Sample.from_notification(
+            name='vsg.dns.queries_answered_locally',
+            type=sample.TYPE_CUMULATIVE,
+            unit='queries',
+            volume=float(message['payload']['queries_answered_locally']),
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['tenant_id'],
+            resource_id=message['payload']['vcpe_id'],
+            message=message)
+
+class VCPEDNSServerQueriesSent(VCPENotificationBase):
+
+    resource_name = None
+    event_types = ['vcpe.dns.server.queries_sent']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VCPE server.queries_sent notification')
+        resource_id = message['payload']['vcpe_id'] + '-' + message['payload']['upstream_server']
+        yield sample.Sample.from_notification(
+            name='vsg.dns.server.queries_sent',
+            type=sample.TYPE_CUMULATIVE,
+            unit='queries',
+            volume=float(message['payload']['queries_sent']),
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['tenant_id'],
+            resource_id=resource_id,
+            message=message)
+
+class VCPEDNSServerQueriesFailed(VCPENotificationBase):
+
+    resource_name = None
+    event_types = ['vcpe.dns.server.queries_failed']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VCPE server.queries_failed notification')
+        resource_id = message['payload']['vcpe_id'] + '-' + message['payload']['upstream_server']
+        yield sample.Sample.from_notification(
+            name='vsg.dns.server.queries_failed',
+            type=sample.TYPE_CUMULATIVE,
+            unit='queries',
+            volume=float(message['payload']['queries_failed']),
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['tenant_id'],
+            resource_id=resource_id,
+            message=message)
+
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/volt/__init__.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/volt/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/volt/__init__.py
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/volt/notifications.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/volt/notifications.py
new file mode 100644
index 0000000..a1f3173
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/ext_services/volt/notifications.py
@@ -0,0 +1,86 @@
+#
+# Copyright 2012 New Dream Network, LLC (DreamHost)
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Handler for producing network counter messages from Neutron notification
+   events.
+
+"""
+
+import oslo_messaging
+from oslo_config import cfg
+
+from ceilometer.agent import plugin_base
+from oslo_log import log
+from ceilometer import sample
+
+OPTS = [
+    cfg.StrOpt('voltservice_control_exchange',
+               default='voltlistener',
+               help="Exchange name for VOLT notifications."),
+]
+
+cfg.CONF.register_opts(OPTS)
+
+LOG = log.getLogger(__name__)
+
+
+class VOLTNotificationBase(plugin_base.NotificationBase):
+
+    resource_name = None
+
+    def get_targets(self,conf):
+        """Return a sequence of oslo.messaging.Target
+
+        This sequence is defining the exchange and topics to be connected for
+        this plugin.
+        """
+        LOG.info("SRIKANTH: get_targets for VOLT Notification Listener")
+        return [oslo_messaging.Target(topic=topic,
+                                      exchange=conf.voltservice_control_exchange)
+                for topic in self.get_notification_topics(conf)]
+
+class VOLTDeviceNotification(VOLTNotificationBase):
+    resource_name = 'volt.device'
+    event_types = ['volt.device','volt.device.disconnect']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VOLT notification')
+        yield sample.Sample.from_notification(
+            name=message['event_type'],
+            type=sample.TYPE_GAUGE,
+            unit='olt',
+            volume=1,
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['project_id'],
+            resource_id=message['payload']['id'],
+            message=message)
+
+class VOLTDeviceSubscriberNotification(VOLTNotificationBase):
+    resource_name = 'volt.device.subscriber'
+    event_types = ['volt.device.subscriber','volt.device.subscriber.unregister']
+
+    def process_notification(self, message):
+        LOG.info('SRIKANTH: Received VOLT notification')
+        resource_id = message['payload']['id'] + '-' + message['payload']['subscriber_id']
+        yield sample.Sample.from_notification(
+            name=message['event_type'],
+            type=sample.TYPE_GAUGE,
+            unit='subscriber',
+            volume=1,
+            user_id=message['payload']['user_id'],
+            project_id=message['payload']['project_id'],
+            resource_id=resource_id,
+            message=message)
+
+
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/__init__.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/__init__.py
new file mode 100644
index 0000000..9c53c36
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/__init__.py
@@ -0,0 +1,101 @@
+#
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+from oslo_utils import netutils
+import six
+from six.moves.urllib import parse as urlparse
+from stevedore import driver as _driver
+
+from ceilometer.agent import plugin_base
+from ceilometer import sample
+
+
+@six.add_metaclass(abc.ABCMeta)
+class _Base(plugin_base.PollsterBase):
+
+    NAMESPACE = 'network.statistics.drivers'
+    drivers = {}
+
+    @property
+    def default_discovery(self):
+        # this signifies that the pollster gets its resources from
+        # elsewhere, in this case they're manually listed in the
+        # pipeline configuration
+        return None
+
+    @abc.abstractproperty
+    def meter_name(self):
+        """Return a Meter Name."""
+
+    @abc.abstractproperty
+    def meter_type(self):
+        """Return a Meter Type."""
+
+    @abc.abstractproperty
+    def meter_unit(self):
+        """Return a Meter Unit."""
+
+    @staticmethod
+    def _parse_my_resource(resource):
+
+        parse_url = netutils.urlsplit(resource)
+
+        params = urlparse.parse_qs(parse_url.query)
+        parts = urlparse.ParseResult(parse_url.scheme,
+                                     parse_url.netloc,
+                                     parse_url.path,
+                                     None,
+                                     None,
+                                     None)
+        return parts, params
+
+    @staticmethod
+    def get_driver(scheme):
+        if scheme not in _Base.drivers:
+            _Base.drivers[scheme] = _driver.DriverManager(_Base.NAMESPACE,
+                                                          scheme).driver()
+        return _Base.drivers[scheme]
+
+    def get_samples(self, manager, cache, resources):
+        resources = resources or []
+        for resource in resources:
+            parse_url, params = self._parse_my_resource(resource)
+            ext = self.get_driver(parse_url.scheme)
+            sample_data = ext.get_sample_data(self.meter_name,
+                                              parse_url,
+                                              params,
+                                              cache)
+
+            for data in sample_data or []:
+                if data is None:
+                    continue
+                if not isinstance(data, list):
+                    data = [data]
+                for (volume, resource_id,
+                     resource_metadata, timestamp) in data:
+
+                    yield sample.Sample(
+                        name=self.meter_name,
+                        type=self.meter_type,
+                        unit=self.meter_unit,
+                        volume=volume,
+                        user_id=None,
+                        project_id='default_admin_tenant',
+                        resource_id=resource_id,
+                        timestamp=timestamp,
+                        resource_metadata=resource_metadata
+                    )
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos/__init__.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos/__init__.py
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos/client.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos/client.py
new file mode 100644
index 0000000..46b6285
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos/client.py
@@ -0,0 +1,250 @@
+#
+# Copyright 2013 NEC Corporation.  All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+from oslo_config import cfg
+import requests
+from requests import auth
+import six
+import json
+
+from ceilometer.i18n import _
+from ceilometer.openstack.common import log
+
+
+CONF = cfg.CONF
+CONF.import_opt('http_timeout', 'ceilometer.service')
+
+
+LOG = log.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class _Base(object):
+    """Base class of ONOS REST APIs Clients."""
+
+    @abc.abstractproperty
+    def base_url(self):
+        """Returns base url for each REST API."""
+
+    def __init__(self, client):
+        self.client = client
+
+    def request(self, path, container_name):
+        return self.client.request(self.base_url + path, container_name)
+
+
+class ONOSRESTAPIFailed(Exception):
+    pass
+
+
+class ONOSRESTAPIClient(_Base):
+    """ONOS Statistics REST API Client
+
+    Base URL:
+      {endpoint}/onos/v1
+    """
+
+    base_url = '/onos/v1'
+
+    def get_devices(self, container_name):
+        """Get device informations
+
+        URL:
+            {Base URL}/devices
+        """
+        output = '{ "devices":[ \
+                     { \
+                        "id":"of:0000000000000001", \
+                        "type":"SWITCH", \
+                        "available":true, \
+                        "role":"MASTER", \
+                        "mfr":"Stanford University, Ericsson Research and CPqD Research", \
+                        "hw":"OpenFlow 1.3 Reference Userspace Switch", \
+                        "sw":"Apr  6 2015 16:10:53", \
+                        "serial":"1", \
+                        "chassisId":"1", \
+                        "annotations":{"protocol":"OF_13","channelId":"192.168.10.50:39306"} \
+                     }]}'
+
+        return self.request('/devices', container_name)
+        #LOG.info("SRIKANTH: Returning dummy ONOS devices output")
+        #outputJson = json.loads(output)
+        #return outputJson
+
+    def get_flow_statistics(self, container_name):
+        """Get flow statistics
+
+        URL:
+            {Base URL}/flows
+        """
+        output = '{"flows":[ \
+                       { \
+                          "deviceId":"of:0000000000000001", \
+                          "id":"3377699721451393", \
+                          "tableId":2, \
+                          "appId":12, \
+                          "groupId":0, \
+                          "priority":100, \
+                          "timeout":0, \
+                          "isPermanent":true, \
+                          "state":"PENDING_ADD", \
+                          "life":0, \
+                          "packets":0, \
+                          "bytes":0, \
+                          "lastSeen":1439355470576, \
+                          "treatment":{"instructions":[],"deferred":[]}, \
+                          "selector":{"criteria":[]} \
+                      }]}'
+        return self.request('/flows', container_name)
+        #LOG.info("SRIKANTH: Returning dummy ONOS flow statistics output")
+        #outputJson = json.loads(output)
+        #return outputJson
+
+    def get_port_statistics(self, container_name):
+        """Get port statistics
+
+        URL:
+            {Base URL}/portstats
+        """
+        output = '{ "portstats": [ \
+                      { \
+                          "deviceId":"of:0000000000000001", \
+                          "id":"3", \
+                          "receivePackets": "182", \
+                          "sentPackets": "173", \
+                          "receiveBytes": "12740", \
+                          "sentBytes": "12110", \
+                          "receiveDrops": "740", \
+                          "sentDrops": "110", \
+                          "receiveErrors": "740", \
+                          "sentErrors": "110", \
+                          "receiveFrameError": "740", \
+                          "receiveOverRunError": "740", \
+                          "receiveCrcError": "740", \
+                          "collisionCount": "110" \
+                      }]}'
+        #TODO Add Portstats REST API to ONOS
+        return self.request('/statistics/ports', container_name)
+        #LOG.info("SRIKANTH: Returning dummy ONOS port statistics output")
+        #outputJson = json.loads(output)
+        #return outputJson
+
+    def get_table_statistics(self, container_name):
+        """Get table statistics
+
+        URL:
+            {Base URL}/table
+        """
+        output = '{ \
+                      "tableStatistics": [ \
+                          { \
+                              "deviceId":"of:0000000000000001", \
+                              "id":"4", \
+                              "activeCount": "11", \
+                              "lookupCount": "816", \
+                              "matchedCount": "220", \
+                              "maximumEntries": "1000" \
+                          } \
+                       ] \
+                    }'
+        #TODO Add table statistics REST API to ONOS
+        return self.request('/statistics/flows/tables', container_name)
+        #LOG.info("SRIKANTH: Returning dummy ONOS table statistics output")
+        #outputJson = json.loads(output)
+        #return outputJson
+
+class Client(object):
+
+    def __init__(self, endpoint, params):
+        self.rest_client = ONOSRESTAPIClient(self)
+
+        self._endpoint = endpoint
+
+        self._req_params = self._get_req_params(params)
+
+    @staticmethod
+    def _get_req_params(params):
+        req_params = {
+            'headers': {
+                'Accept': 'application/json'
+            },
+            'timeout': CONF.http_timeout,
+        }
+
+        auth_way = params.get('auth')
+        if auth_way in ['basic', 'digest']:
+            user = params.get('user')
+            password = params.get('password')
+
+            if auth_way == 'basic':
+                auth_class = auth.HTTPBasicAuth
+            else:
+                auth_class = auth.HTTPDigestAuth
+
+            req_params['auth'] = auth_class(user, password)
+        return req_params
+
+    def _log_req(self, url):
+
+        curl_command = ['REQ: curl -i -X GET ', '"%s" ' % (url)]
+
+        if 'auth' in self._req_params:
+            auth_class = self._req_params['auth']
+            if isinstance(auth_class, auth.HTTPBasicAuth):
+                curl_command.append('--basic ')
+            else:
+                curl_command.append('--digest ')
+
+            curl_command.append('--user "%s":"%s" ' % (auth_class.username,
+                                                       auth_class.password))
+
+        for name, value in six.iteritems(self._req_params['headers']):
+            curl_command.append('-H "%s: %s" ' % (name, value))
+
+        LOG.debug(''.join(curl_command))
+
+    @staticmethod
+    def _log_res(resp):
+
+        dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version,
+                                                  resp.status_code,
+                                                  resp.reason)]
+        dump.extend('%s: %s\n' % (k, v)
+                    for k, v in six.iteritems(resp.headers))
+        dump.append('\n')
+        if resp.content:
+            dump.extend([resp.content, '\n'])
+
+        LOG.debug(''.join(dump))
+
+    def _http_request(self, url):
+        if CONF.debug:
+            self._log_req(url)
+        resp = requests.get(url, **self._req_params)
+        if CONF.debug:
+            self._log_res(resp)
+        if resp.status_code / 100 != 2:
+            raise ONOSRESTAPIFailed(
+                _('ONOS API returned %(status)s %(reason)s') %
+                {'status': resp.status_code, 'reason': resp.reason})
+
+        return resp.json()
+
+    def request(self, path, container_name):
+
+        url = self._endpoint + path % {'container_name': container_name}
+        return self._http_request(url)
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos/driver.py b/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos/driver.py
new file mode 100644
index 0000000..810275f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/network/statistics/onos/driver.py
@@ -0,0 +1,359 @@
+#
+# Copyright 2013 NEC Corporation.  All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from oslo_utils import timeutils
+import six
+from six import moves
+from six.moves.urllib import parse as urlparse
+
+from ceilometer.i18n import _
+from ceilometer.network.statistics import driver
+from ceilometer.network.statistics.onos import client
+from ceilometer.openstack.common import log
+from ceilometer import utils
+
+
+LOG = log.getLogger(__name__)
+
+
+def _get_properties(properties, prefix='properties'):
+    resource_meta = {}
+    if properties is not None:
+        for k, v in six.iteritems(properties):
+            value = v['value']
+            key = prefix + '_' + k
+            if 'name' in v:
+                key += '_' + v['name']
+            resource_meta[key] = value
+    return resource_meta
+
+
+def _get_int_sample(key, statistic, resource_id, resource_meta):
+    if key not in statistic:
+        return None
+    return int(statistic[key]), resource_id, resource_meta
+
+
+class ONOSDriver(driver.Driver):
+    """Driver of network info collector from ONOS.
+
+    This driver uses resources in "pipeline.yaml".
+    Resource requires below conditions:
+
+    * resource is url
+    * scheme is "onos"
+
+    This driver can be configured via query parameters.
+    Supported parameters:
+
+    * scheme:
+      The scheme of request url to ONOS REST API endpoint.
+      (default http)
+    * auth:
+      Auth strategy of http.
+      This parameter can be set basic and digest.(default None)
+    * user:
+      This is username that is used by auth.(default None)
+    * password:
+      This is password that is used by auth.(default None)
+    * container_name:
+      Name of container of ONOS.(default "default")
+      This parameter allows multi vaues.
+
+    e.g.::
+
+      onos://127.0.0.1:8181/onos/v1?auth=basic&user=admin&password=admin&scheme=http
+
+    In this case, the driver send request to below URLs:
+
+      http://127.0.0.1:8181/onos/v1/flows
+    """
+    @staticmethod
+    def _prepare_cache(endpoint, params, cache):
+
+        if 'network.statistics.onos' in cache:
+            return cache['network.statistics.onos']
+
+        data = {}
+
+        container_names = params.get('container_name', ['default'])
+
+        onos_params = {}
+        if 'auth' in params:
+            onos_params['auth'] = params['auth'][0]
+        if 'user' in params:
+            onos_params['user'] = params['user'][0]
+        if 'password' in params:
+            onos_params['password'] = params['password'][0]
+        cs = client.Client(endpoint, onos_params)
+
+        for container_name in container_names:
+            try:
+                container_data = {}
+
+                # get flow statistics
+                container_data['flow'] = cs.rest_client.get_flow_statistics(
+                    container_name)
+
+                # get port statistics
+                container_data['port'] = cs.rest_client.get_port_statistics(
+                    container_name)
+
+                # get table statistics
+                container_data['table'] = cs.rest_client.get_table_statistics(
+                    container_name)
+
+                # get topology
+                #container_data['topology'] = cs.topology.get_topology(
+                #    container_name)
+
+                # get switch informations
+                container_data['switch'] = cs.rest_client.get_devices(
+                    container_name)
+
+                container_data['timestamp'] = timeutils.isotime()
+
+                data[container_name] = container_data
+            except Exception:
+                LOG.exception(_('Request failed to connect to ONOS'
+                                ' with NorthBound REST API'))
+
+        cache['network.statistics.onos'] = data
+
+        return data
+
+    def get_sample_data(self, meter_name, parse_url, params, cache):
+
+        extractor = self._get_extractor(meter_name)
+        if extractor is None:
+            # The way to getting meter is not implemented in this driver or
+            # ONOS REST API has not api to getting meter.
+            return None
+
+        iter = self._get_iter(meter_name)
+        if iter is None:
+            # The way to getting meter is not implemented in this driver or
+            # ONOS REST API has not api to getting meter.
+            return None
+
+        parts = urlparse.ParseResult(params.get('scheme', ['http'])[0],
+                                     parse_url.netloc,
+                                     parse_url.path,
+                                     None,
+                                     None,
+                                     None)
+        endpoint = urlparse.urlunparse(parts)
+
+        data = self._prepare_cache(endpoint, params, cache)
+
+        samples = []
+        for name, value in six.iteritems(data):
+            timestamp = value['timestamp']
+            for sample in iter(extractor, value):
+                if sample is not None:
+                    # set controller name and container name
+                    # to resource_metadata
+                    sample[2]['controller'] = 'ONOS'
+                    sample[2]['container'] = name
+
+                    samples.append(sample + (timestamp, ))
+
+        return samples
+
+    def _get_iter(self, meter_name):
+        if meter_name == 'switch':
+            return self._iter_switch
+        elif meter_name.startswith('switch.flow'):
+            return self._iter_flow
+        elif meter_name.startswith('switch.table'):
+            return self._iter_table
+        elif meter_name.startswith('switch.port'):
+            return self._iter_port
+
+    def _get_extractor(self, meter_name):
+        method_name = '_' + meter_name.replace('.', '_')
+        return getattr(self, method_name, None)
+
+    @staticmethod
+    def _iter_switch(extractor, data):
+        for switch in data['switch']['devices']:
+            yield extractor(switch, switch['id'], {})
+
+    @staticmethod
+    def _switch(statistic, resource_id, resource_meta):
+
+        for key in ['mfr','hw','sw','available']:
+            resource_meta[key] = statistic[key]
+        for key in ['protocol','channelId']:
+            resource_meta[key] = statistic['annotations'][key]
+
+        return 1, resource_id, resource_meta
+
+    @staticmethod
+    def _iter_port(extractor, data):
+        for statistic in data['port']['statistics']:
+            for port_statistic in statistic['ports']:
+                 resource_meta = {'port': port_statistic['port']}
+                 yield extractor(port_statistic, statistic['device'],
+                                resource_meta, data)
+
+    @staticmethod
+    def _switch_port(statistic, resource_id, resource_meta, data):
+        return 1, resource_id, resource_meta
+
+    @staticmethod
+    def _switch_port_receive_packets(statistic, resource_id,
+                                     resource_meta, data):
+        return _get_int_sample('packetsReceived', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_port_transmit_packets(statistic, resource_id,
+                                      resource_meta, data):
+        return _get_int_sample('packetsSent', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_port_receive_bytes(statistic, resource_id,
+                                   resource_meta, data):
+        return _get_int_sample('bytesReceived', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_port_transmit_bytes(statistic, resource_id,
+                                    resource_meta, data):
+        return _get_int_sample('bytesSent', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_port_receive_drops(statistic, resource_id,
+                                   resource_meta, data):
+        return _get_int_sample('packetsRxDropped', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_port_transmit_drops(statistic, resource_id,
+                                    resource_meta, data):
+        return _get_int_sample('packetsTxDropped', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_port_receive_errors(statistic, resource_id,
+                                    resource_meta, data):
+        return _get_int_sample('packetsRxErrors', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_port_transmit_errors(statistic, resource_id,
+                                     resource_meta, data):
+        return _get_int_sample('packetsTxErrors', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_port_receive_frame_error(statistic, resource_id,
+                                         resource_meta, data):
+        #return _get_int_sample('receiveFrameError', statistic, resource_id,
+        #                       resource_meta)
+        return 0, resource_id, resource_meta
+
+    @staticmethod
+    def _switch_port_receive_overrun_error(statistic, resource_id,
+                                           resource_meta, data):
+        #return _get_int_sample('receiveOverRunError', statistic, resource_id,
+        #                       resource_meta)
+        return 0, resource_id, resource_meta
+
+    @staticmethod
+    def _switch_port_receive_crc_error(statistic, resource_id,
+                                       resource_meta, data):
+        #return _get_int_sample('receiveCrcError', statistic, resource_id,
+        #                       resource_meta)
+        return 0, resource_id, resource_meta
+
+    @staticmethod
+    def _switch_port_collision_count(statistic, resource_id,
+                                     resource_meta, data):
+        #return _get_int_sample('collisionCount', statistic, resource_id,
+        #                       resource_meta)
+        return 0, resource_id, resource_meta
+
+    @staticmethod
+    def _iter_table(extractor, data):
+        for statistic in data['table']['statistics']:
+            for table_statistic in statistic['table']:
+                 resource_meta = {'table_id': table_statistic['tableId']}
+                 yield extractor(table_statistic,
+                            statistic['device'],
+                            resource_meta)
+
+    @staticmethod
+    def _switch_table(statistic, resource_id, resource_meta):
+        return 1, resource_id, resource_meta
+
+    @staticmethod
+    def _switch_table_active_entries(statistic, resource_id,
+                                     resource_meta):
+        return _get_int_sample('activeEntries', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_table_lookup_packets(statistic, resource_id,
+                                     resource_meta):
+        return _get_int_sample('packetsLookedUp', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_table_matched_packets(statistic, resource_id,
+                                      resource_meta):
+        return _get_int_sample('packetsMathced', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _iter_flow(extractor, data):
+        for flow_statistic in data['flow']['flows']:
+            resource_meta = {'flow_id': flow_statistic['id'],
+                             'table_id': flow_statistic['tableId'],
+                             'priority': flow_statistic['priority'],
+                             'state': flow_statistic['state']}
+            yield extractor(flow_statistic,
+                            flow_statistic['deviceId'],
+                            resource_meta)
+
+    @staticmethod
+    def _switch_flow(statistic, resource_id, resource_meta):
+        return 1, resource_id, resource_meta
+
+    @staticmethod
+    def _switch_flow_duration_seconds(statistic, resource_id,
+                                      resource_meta):
+        if 'life' not in statistic:
+            return None
+        return int(statistic['life']/1000), resource_id, resource_meta
+
+    @staticmethod
+    def _switch_flow_duration_nanoseconds(statistic, resource_id,
+                                          resource_meta):
+        if 'life' not in statistic:
+            return None
+        return int(statistic['life']*1000), resource_id, resource_meta
+
+    @staticmethod
+    def _switch_flow_packets(statistic, resource_id, resource_meta):
+        return _get_int_sample('packets', statistic, resource_id,
+                               resource_meta)
+
+    @staticmethod
+    def _switch_flow_bytes(statistic, resource_id, resource_meta):
+        return _get_int_sample('bytes', statistic, resource_id,
+                               resource_meta)
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/pipeline_sample.yaml b/xos/synchronizer/ceilometer/ceilometer-plugins/pipeline_sample.yaml
new file mode 100644
index 0000000..cb6c213
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/pipeline_sample.yaml
@@ -0,0 +1,120 @@
+---
+sources:
+    - name: all_meters
+      interval: 600
+      meters:
+          - "*"
+          - "!switch"
+          - "!switch.*"
+          - "!vcpe"
+          - "!vcpe.*"
+          - "!cpu"
+          - "!cpu_util"
+          - "!instance"
+          - "!network.incoming.bytes"
+          - "!network.incoming.packets"
+          - "!network.outgoing.bytes"
+          - "!network.outgoing.packets"
+      sinks:
+          - all_meters_sink
+    - name: sdn_source1
+      interval: 600
+      meters:
+          - "switch"
+          - "switch.*"
+      resources:
+          - onos://10.11.10.60:8181?auth=basic&user=onos&password=rocks&scheme=http
+          - onos://10.11.10.61:8181?auth=basic&user=onos&password=rocks&scheme=http
+      sinks:
+          - sdn_sink
+    - name: vcpe_source
+      interval: 600
+      meters:
+          - "vcpe"
+          - "vcpe.*"
+      sinks:
+          - vcpe_sink
+    - name: memory_source
+      interval: 600
+      meters:
+          - "memory"
+      sinks:
+          - memory_sink
+    - name: cpu_source
+      interval: 600
+      meters:
+          - "cpu"
+      sinks:
+          - cpu_sink
+    - name: cpu_util_source
+      interval: 600
+      meters:
+          - "cpu_util"
+      sinks:
+          - cpu_util_sink
+    - name: compute_instance_meters
+      interval: 600
+      meters:
+            - "instance"
+      sinks:
+          - compute_sink
+    - name: network_source
+      interval: 600
+      meters:
+          - "network.incoming.bytes"
+          - "network.incoming.packets"
+          - "network.outgoing.bytes"
+          - "network.outgoing.packets"
+      sinks:
+          - network_sink
+sinks:
+    - name: all_meters_sink
+      transformers:
+      publishers:
+          - notifier://
+    - name: sdn_sink
+      transformers:
+      publishers:
+          - notifier://
+    - name: vcpe_sink
+      transformers:
+      publishers:
+          - notifier://
+    - name: memory_sink
+      transformers:
+      publishers:
+          - notifier://
+    - name: cpu_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                target:
+                    name: "cpu_util"
+                    unit: "%"
+                    type: "gauge"
+                    scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
+      publishers:
+          - notifier://
+    - name: cpu_util_sink
+      transformers:
+      publishers:
+          - notifier://
+    - name: compute_sink
+      publishers:
+          - notifier://
+
+    - name: network_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                source:
+                   map_from:
+                       name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
+                       unit: "(B|packet)"
+                target:
+                    map_to:
+                        name: "network.\\1.\\2.rate"
+                        unit: "\\1/s"
+                    type: "gauge"
+      publishers:
+          - notifier://
diff --git a/xos/synchronizer/ceilometer/ceilometer-plugins/sample_event_publisher.py b/xos/synchronizer/ceilometer/ceilometer-plugins/sample_event_publisher.py
new file mode 100644
index 0000000..eb84a2f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer-plugins/sample_event_publisher.py
@@ -0,0 +1,58 @@
+from kombu.connection import BrokerConnection
+from kombu.messaging import Exchange, Queue, Consumer, Producer
+import six
+import uuid
+import datetime
+
+keystone_tenant_id='3a397e70f64e4e40b69b6266c634d9d0'
+keystone_user_id='1e3ce043029547f1a61c1996d1a531a2'
+rabbit_user='openstack'
+rabbit_password='password'
+rabbit_host='localhost'
+vcpeservice_rabbit_exchange='vcpeservice'
+cpe_publisher_id='vcpe_publisher'
+
+producer = None
+
+def setup_rabbit_mq_channel():
+     global producer
+     global rabbit_user, rabbit_password, rabbit_host, vcpeservice_rabbit_exchange,cpe_publisher_id
+     vcpeservice_exchange = Exchange(vcpeservice_rabbit_exchange, "topic", durable=False)
+     # connections/channels
+     connection = BrokerConnection(rabbit_host, rabbit_user, rabbit_password)
+     print 'Connection to RabbitMQ server successful'
+     channel = connection.channel()
+     # produce
+     producer = Producer(channel, exchange=vcpeservice_exchange, routing_key='notifications.info')
+
+def publish_cpe_stats():
+     global producer
+     global keystone_tenant_id, keystone_user_id, cpe_publisher_id
+
+     msg = {'event_type': 'vcpe',
+            'message_id':six.text_type(uuid.uuid4()),
+            'publisher_id': cpe_publisher_id,
+            'timestamp':datetime.datetime.now().isoformat(),
+            'priority':'INFO',
+            'payload': {'vcpe_id':'vcpe-222-432',
+                        'user_id': keystone_user_id,
+                        'tenant_id': keystone_tenant_id
+                       }
+           }
+     producer.publish(msg)
+     msg = {'event_type': 'vcpe.dns.cache.size',
+            'message_id':six.text_type(uuid.uuid4()),
+            'publisher_id': cpe_publisher_id,
+            'timestamp':datetime.datetime.now().isoformat(),
+            'priority':'INFO',
+            'payload': {'vcpe_id':'vcpe-222-432',
+                        'user_id': keystone_user_id,
+                        'tenant_id': keystone_tenant_id,
+                        'cache_size':150
+                       }
+            }           
+     producer.publish(msg)
+
+if __name__ == "__main__":
+   setup_rabbit_mq_channel()
+   publish_cpe_stats()
diff --git a/xos/synchronizer/ceilometer/ceilometer_pub_sub/README b/xos/synchronizer/ceilometer/ceilometer_pub_sub/README
new file mode 100644
index 0000000..dbd6a5f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_pub_sub/README
@@ -0,0 +1,70 @@
+
+Subscribe-Publish Frame Work:
+1.Command to Install Flask Webserver frame work.
+  sudo pip install Flask
+
+  Along with flask we need the following packages:
+   msgpack
+   fnmatch
+   operator
+   logging
+   oslo_utils
+   ConfigParser
+ 
+2.Files: i.sub_main.py
+         ii.pubrecords.py
+         iii.pub_sub.conf
+
+3.Command to start the server:
+    #python sun_main.py
+4.Command for subscription:
+      i.app_id:Application ID,should be unique.
+      ii.target:
+           Presently only udp is supported.
+           a.udp:<ip:portno>
+           b.kafka:<kafkaip:kafkaport>
+      iii.sub_info:Sunscription notifications.ex:cpu_util,cpu_*
+           It can be given as single input or list.
+      iv.query:
+         Below information need to provide as part of query.
+         a.field:fileds like user id ,porject id etc.,
+         b.op:"eq","gt","lt" etc.,
+         c.value:value of the fileds.
+     Example:
+  		 curl -i -H "Content-Type: application/json" -X POST -d '{"app_id":"10","target":"udp://10.11.10.1:5006","sub_info":"cpu_util","query":[{"field":"user_id","op":"eq","value":"e1271a86bd4e413c87248baf2e5f01e0"},{"field":"project_id","op":"eq","value":"b1a3bf16d2014b47be9aefea88087318"},{"field":"resource_id","op":"eq","value":"658cd03f-d0f0-4f55-9f48-39e7222a8646"}]}' -L http://10.11.10.1:4455/subscribe
+           curl -i -H "Content-Type: application/json" -X POST -d '{"app_id":"10","target":"udp://10.11.10.1:5006", "sub_info":["cpu_util", "memory"],"query":[{"field":"user_id","op":"eq","value":"e1271a86bd4e413c87248baf2e5f01e0"},{"field":"project_id","op":"eq","value":"b1a3bf16d2014b47be9aefea88087318"},{"field":"resource_id","op":"eq","value":"658cd03f-d0f0-4f55-9f48-39e7222a8646"}]}' -L http://10.11.10.1:4455/subscribe
+
+5.Command for unsunscription:
+    For unsubcription only appid will be needed.
+    curl -i -H "Content-Type: application/json" -X POST -d '{"app_id":"10"}' http://10.11.10.1:4455/unsubscribe
+
+6.Running Kafka on the server server where pub-sub module is running:
+  i.Download the kafka from:
+     #https://www.apache.org/dyn/closer.cgi?path=/kafka/0.9.0.0/kafka_2.11-0.9.0.0.tgz
+     http://apache.arvixe.com/kafka/0.9.0.0/kafka_2.11-0.9.0.0.tgz
+  ii.install java
+     sudo apt-get update
+     sudo apt-get install default-jre
+  iii. install kafka package
+     sudo easy_install pip
+     sudo pip install kafka-python
+  iv.tar -xzf kafka_2.11-0.9.0.0.tgz
+  v. Start the zookeeper server:
+      bin/zookeeper-server-start.sh config/zookeeper.properties
+  vi.Start Kafka Server :
+      bin/kafka-server-start.sh config/server.properties
+  vii.To read messages from kafka on a topic test :
+     bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
+  viii.Before configuring kafka:// publisher in ceilometer:
+     install kafka on both controller node and compute nodes
+     Restart the ceilometer-agent-notification, ceilometer-agent-compute, ceilometer-agent-central daemons
+
+7.[Optional]Install Kafka-web-console (GUI)
+  i.wget https://github.com/adamfokken/kafka-web-console/archive/topic-add-remove.zip
+  ii.unzip it
+  iii.wget http://downloads.typesafe.com/typesafe-activator/1.3.2/typesafe-activator-1.3.2-minimal.zip
+  iv.unzip it and add it to the system path so you can execute the activator command that it provides.
+  v.Install javac if required: sudo apt-get install openjdk-7-jdk
+  vi.cd kafka-web-console-topic-add-remove
+  vii.activator start -DapplyEvolutions.default=true
+  viii.Point your browser to the kafka we-console port (9000) and register the zookeeper
diff --git a/xos/synchronizer/ceilometer/ceilometer_pub_sub/kafka_broker.py b/xos/synchronizer/ceilometer/ceilometer_pub_sub/kafka_broker.py
new file mode 100644
index 0000000..778486b
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_pub_sub/kafka_broker.py
@@ -0,0 +1,167 @@
+#
+# Copyright 2015 Cisco Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import kafka
+from kafka import TopicPartition
+from oslo_config import cfg
+from oslo_utils import netutils
+from six.moves.urllib import parse as urlparse
+import logging as LOG
+
+
+class KafkaBrokerPublisher():
+    def __init__(self, parsed_url):
+        self.kafka_client = None
+        self.kafka_server = None
+
+        self.host, self.port = netutils.parse_host_port(
+            parsed_url.netloc, default_port=9092)
+
+        self.local_queue = []
+
+        params = urlparse.parse_qs(parsed_url.query)
+        self.topic = params.get('topic', ['ceilometer'])[-1]
+        self.policy = params.get('policy', ['default'])[-1]
+        self.max_queue_length = int(params.get(
+            'max_queue_length', [1024])[-1])
+        self.max_retry = int(params.get('max_retry', [100])[-1])
+
+        if self.policy in ['default', 'drop', 'queue']:
+            LOG.info(('Publishing policy set to %s') % self.policy)
+        else:
+            LOG.warn(('Publishing policy is unknown (%s) force to default')
+                     % self.policy)
+            self.policy = 'default'
+
+        try:
+            self._get_client()
+            self._get_server()
+        except Exception as e:
+            LOG.exception("Failed to connect to Kafka service: %s", e)
+
+    def publish_samples(self, context, samples):
+        """Send a metering message for kafka broker.
+
+        :param context: Execution context from the service or RPC call
+        :param samples: Samples from pipeline after transformation
+        """
+        samples_list = [
+            utils.meter_message_from_counter(
+                sample, cfg.CONF.publisher.telemetry_secret)
+            for sample in samples
+        ]
+
+        self.local_queue.append(samples_list)
+
+        try:
+            self._check_kafka_connection()
+        except Exception as e:
+            raise e
+
+        self.flush()
+
+    def flush(self):
+        queue = self.local_queue
+        self.local_queue = self._process_queue(queue)
+        if self.policy == 'queue':
+            self._check_queue_length()
+
+    def publish_events(self, context, events):
+        """Send an event message for kafka broker.
+
+        :param context: Execution context from the service or RPC call
+        :param events: events from pipeline after transformation
+        """
+        events_list = [utils.message_from_event(
+            event, cfg.CONF.publisher.telemetry_secret) for event in events]
+
+        self.local_queue.append(events_list)
+
+        try:
+            self._check_kafka_connection()
+        except Exception as e:
+            raise e
+
+        self.flush()
+
+    def _process_queue(self, queue):
+        current_retry = 0
+        while queue:
+            data = queue[0]
+            try:
+                self._send(data)
+            except Exception:
+                LOG.warn(("Failed to publish %d datum"),
+                         sum([len(d) for d in queue]))
+                if self.policy == 'queue':
+                    return queue
+                elif self.policy == 'drop':
+                    return []
+                current_retry += 1
+                if current_retry >= self.max_retry:
+                    self.local_queue = []
+                    LOG.exception(("Failed to retry to send sample data "
+                                      "with max_retry times"))
+                    raise
+            else:
+                queue.pop(0)
+        return []
+
+    def _check_queue_length(self):
+        queue_length = len(self.local_queue)
+        if queue_length > self.max_queue_length > 0:
+            diff = queue_length - self.max_queue_length
+            self.local_queue = self.local_queue[diff:]
+            LOG.warn(("Kafka Publisher max local queue length is exceeded, "
+                     "dropping %d oldest data") % diff)
+
+    def _check_kafka_connection(self):
+        try:
+            self._get_client()
+        except Exception as e:
+            LOG.exception(_LE("Failed to connect to Kafka service: %s"), e)
+
+            if self.policy == 'queue':
+                self._check_queue_length()
+            else:
+                self.local_queue = []
+            raise Exception('Kafka Client is not available, '
+                            'please restart Kafka client')
+
+    def _get_client(self):
+        if not self.kafka_client:
+            self.kafka_client = kafka.KafkaClient(
+                "%s:%s" % (self.host, self.port))
+            self.kafka_producer = kafka.SimpleProducer(self.kafka_client)
+    
+    def _get_server(self):
+        if not self.kafka_server:
+           self.kafka_server = kafka.KafkaClient(
+                "%s:%s" % (self.host, self.port))
+           #self.kafka_consumer = kafka.KafkaConsumer(self.topic,bootstrap_servers = ["%s:%s" % (self.host, self.port)])
+           self.kafka_consumer=kafka.KafkaConsumer(bootstrap_servers=["%s:%s" % (self.host,self.port)])
+           self.kafka_consumer.assign([TopicPartition(self.topic,0)])
+           self.kafka_consumer.seek_to_end()
+
+    def _send(self, data):
+        #for d in data:
+            try:
+                self.kafka_producer.send_messages(
+                    self.topic, json.dumps(data))
+            except Exception as e:
+                LOG.exception(("Failed to send sample data: %s"), e)
+                raise
diff --git a/xos/synchronizer/ceilometer/ceilometer_pub_sub/pub_sub.conf b/xos/synchronizer/ceilometer/ceilometer_pub_sub/pub_sub.conf
new file mode 100644
index 0000000..6998903
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_pub_sub/pub_sub.conf
@@ -0,0 +1,44 @@
+#[LOGGING]
+#level = INFO
+#filename = pub_sub.log
+#maxbytes = 1000000
+#backupcount = 5
+
+[WEB_SERVER]
+webserver_host = localhost
+webserver_port = 4455 
+
+[CLIENT]
+target = kafka://localhost:9092?topic=ceilometer
+#target = udp://10.11.10.1:5004/
+
+[RABBITMQ]
+#UpdateConfMgmt = True
+UpdateConfMgmt = False
+Rabbitmq_username = openstack
+Rabbitmq_passwd = password 
+Rabbitmq_host = localhost
+Rabbitmq_port = 5672
+
+[loggers]
+keys=root
+
+[handlers]
+keys=logfile
+
+[formatters]
+keys=logfileformatter
+
+[logger_root]
+level=INFO
+handlers=logfile
+
+[formatter_logfileformatter]
+format='%(asctime)s %(filename)s %(levelname)s %(message)s'
+
+[handler_logfile]
+class=handlers.RotatingFileHandler
+level=NOTSET
+args=('pub_sub.log','a',1000000,5)
+formatter=logfileformatter
+
diff --git a/xos/synchronizer/ceilometer/ceilometer_pub_sub/pubrecords.py b/xos/synchronizer/ceilometer/ceilometer_pub_sub/pubrecords.py
new file mode 100644
index 0000000..bca62b8
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_pub_sub/pubrecords.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+import socket
+from oslo_utils import units
+from oslo_utils import netutils
+import kafka
+import kafka_broker
+import fnmatch
+import logging
+import copy
+
+sub_info=[] 
+class subinfo:
+    def __init__(self,scheme,app_id,app_ip,app_port,subscription_info,sub_info_filter,target):
+        logging.debug("* Updating subscription_info ") 
+        self.scheme = scheme
+        self.app_id = app_id
+        self.ipaddress = app_ip 
+        self.portno = app_port 
+        self.subscription_info = subscription_info
+        self.sub_info_filter = sub_info_filter
+        self.target = target
+        
+        if scheme == "kafka":
+            ''' Creating kafka publisher to send message over kafka '''
+            parse_target = netutils.urlsplit(target)
+            self.kafka_publisher = kafka_broker.KafkaBrokerPublisher(parse_target)
+        elif scheme == "udp":
+            ''' Creating UDP socket to send message over UDP '''
+            self.udp = socket.socket(socket.AF_INET, # Internet
+                                     socket.SOCK_DGRAM) # UDP
+            self.udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)   
+
+    def update_subinfo(self):
+        logging.info("* inside %s",self.update_subinfo.__name__)
+        if not sub_info:
+            logging.debug("* -----------List is EMpty -------------") 
+            sub_info.append(self)
+            logging.debug("* Subscription is sucessful") 
+            return "Subscription is sucessful \n" 
+        for obj in sub_info:
+            if obj.app_id == self.app_id :
+               # obj.subscription_info=self.subscription_info
+                sub_info.remove(obj)
+                sub_info.append(self)
+                logging.warning("* entry already exists so overwriting this subscription \n")
+                return "entry already exists so overwriting this subscription \n" 
+        sub_info.append(self)
+        return "Subscription is sucessful \n"
+ 
+    @staticmethod
+    def delete_subinfo(app_id):
+        logging.info("* inside %s",subinfo.delete_subinfo.__name__)
+        Flag = False 
+        for obj in sub_info:
+            if obj.app_id == app_id : 
+                    sub_info.remove(obj)
+                    Flag = True
+                    logging.debug("* Un-Subscription is sucessful") 
+                    return "Un-Subscription is sucessful \n"
+        if not Flag :
+           err_str = "No subscription exists with app id: " + app_id + "\n"
+           logging.error("* No subscription exists with app id:%s ",app_id)
+           raise Exception (err_str)
+       
+    @staticmethod
+    def print_subinfo():
+        logging.info("* inside %s",subinfo.print_subinfo.__name__)
+        for obj in sub_info:
+            logging.debug("* ------------------------------------------------") 
+            logging.debug("* scheme:%s",obj.scheme)  
+            logging.debug("* app_id:%s",obj.app_id)
+            logging.debug("* portno:%s",obj.portno ) 
+            logging.debug("* ipaddress:%s",obj.ipaddress)  
+            logging.debug("* subscription_info:%s",obj.subscription_info)
+            logging.debug("* sub_info_filter:%s",obj.sub_info_filter)
+            logging.debug("* target:%s",obj.target)
+            logging.debug("* ------------------------------------------------")
+    @staticmethod
+    def get_subinfo(app_id):
+        logging.info("* inside %s",subinfo.get_subinfo.__name__)
+        Flag = False
+        for obj in sub_info:
+            if obj.app_id == app_id :
+                    return obj.subscription_info,obj.target
+        return (None,None)
+       
+ 
+    @staticmethod
+    def get_sub_list(notif_subscription_info):
+        logging.info("* inside %s",subinfo.get_sublist.__name__)
+        sub_list=[]  
+        for obj in sub_info:
+            if obj.subscription_info == notif_subscription_info:
+                sub_list.append(obj)
+        return sub_list
diff --git a/xos/synchronizer/ceilometer/ceilometer_pub_sub/sub_main.py b/xos/synchronizer/ceilometer/ceilometer_pub_sub/sub_main.py
new file mode 100644
index 0000000..e7ebcf4
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_pub_sub/sub_main.py
@@ -0,0 +1,559 @@
+#!/usr/bin/python
+import socket,thread
+import sys
+import msgpack
+import fnmatch
+import operator
+import logging
+import logging.handlers
+import logging.config
+import ConfigParser
+import json
+from oslo_utils import units
+from oslo_utils import netutils
+from pubrecords import *
+import kafka
+import kafka_broker
+
+from flask import request, Request, jsonify
+from flask import Flask
+from flask import make_response
+app = Flask(__name__)
+
+COMPARATORS = {
+    'gt': operator.gt,
+    'lt': operator.lt,
+    'ge': operator.ge,
+    'le': operator.le,
+    'eq': operator.eq,
+    'ne': operator.ne,
+}
+
+LEVELS = {'DEBUG': logging.DEBUG,
+          'INFO': logging.INFO,
+          'WARNING': logging.WARNING,
+          'ERROR': logging.ERROR,
+          'CRITICAL': logging.CRITICAL}
+
+_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+''' Stores all the subscribed meter's list '''
+meter_list = []
+''' Stores meter to app-id mapping '''
+meter_dict = {}
+
+@app.route('/subscribe',methods=['POST','SUB'])
+def subscribe():
+    try :
+        app_id = request.json['app_id']
+        target = request.json['target']
+        sub_info = request.json['sub_info']
+
+        try :
+            validate_sub_info(sub_info)
+        except Exception as e:
+            logging.error("* %s",e.__str__())
+            return e.__str__()
+
+        ''' Flag to Update pipeling cfg file '''
+        config = ConfigParser.ConfigParser()
+        config.read('pub_sub.conf')
+        if config.get('RABBITMQ','UpdateConfMgmt') == "True" : 
+            update_pipeline_conf(sub_info,target,app_id,"ADD")
+        else:
+            logging.warning("Update Conf Mgmt flag is disabled,enable the flag to  update Conf Mgmt")
+
+        if not 'query' in request.json.keys():
+            logging.info("query request is not provided by user")
+            query = None 
+        else:
+             query = request.json['query']
+             for i in range(len(query)):
+                 if not 'field' in query[i].keys():
+                     err_str = "Query field"
+                     raise Exception (err_str)
+                 if not 'op' in query[i].keys():
+                     err_str = "Query op"
+                     raise Exception (err_str)
+                 if not 'value' in query[i].keys():
+                     err_str = "Query value" 
+                     raise Exception (err_str)
+    except Exception as e:
+        err_str = "KeyError: Parsing subscription request " + e.__str__() + "\n"
+        logging.error("* KeyError: Parsing subscription request :%s",e.__str__())  
+        return err_str 
+
+    parse_target=netutils.urlsplit(target)
+    if not parse_target.netloc:
+        err_str = "Error:Invalid target format"
+        logging.error("* Invalid target format")
+        return err_str 
+
+    status = "" 
+    if parse_target.scheme == "udp" or  parse_target.scheme == "kafka":
+         host,port=netutils.parse_host_port(parse_target.netloc)
+         scheme = parse_target.scheme
+         app_ip = host 
+         app_port = port
+ 
+         if host == None or port == None :
+             err_str = "* Error: Invalid IP Address format"
+             logging.error("* Invalid IP Address format")
+             return err_str
+  
+         subscription_info = sub_info
+         sub_info_filter = query 
+         logging.info("Creating subscription for app:%s for meters:%s with filters:%s and target:%s",app_id, subscription_info, sub_info_filter, target)
+         subscrip_obj=subinfo(scheme,app_id,app_ip,app_port,subscription_info,sub_info_filter,target)
+         status = subscrip_obj.update_subinfo()
+         subinfo.print_subinfo()
+
+    if parse_target.scheme == "file" :
+         pass
+    return status 
+
+@app.route('/unsubscribe',methods=['POST','UNSUB'])
+def unsubscribe():
+    try :  
+        app_id = request.json['app_id']
+        sub_info,target = subinfo.get_subinfo(app_id)
+        if sub_info is None or target is None:
+            err_str = "No subscription exists with app id: " + app_id + "\n"
+            logging.error("* No subscription exists with app id:%s ",app_id)
+            return err_str 
+        else:
+            ''' Flag to Update pipeling cfg file '''
+            config = ConfigParser.ConfigParser()
+            config.read('pub_sub.conf')
+            if config.get('RABBITMQ','UpdateConfMgmt') == "True" :
+                update_pipeline_conf(sub_info,target,app_id,"DEL")
+            else:
+                logging.warning("Update Conf Mgmt flag is disabled,enable the flag to  update Conf Mgmt")
+            #update_pipeline_conf(sub_info,target,"DEL")
+            subinfo.delete_subinfo(app_id)
+    except Exception as e:
+         logging.error("* %s",e.__str__())
+         return e.__str__()
+    return "UnSubscrition is sucessful! \n"
+
+@app.errorhandler(404)
+def not_found(error):
+    return make_response(jsonify({'error': 'Not found'}), 404)
+
+def print_subscribed_meter_list():
+    logging.debug("-------------------------------------------------")
+    #print (meter_list)
+    logging.debug("meter_list:%s",meter_list)
+    logging.debug("meter_dict:%s",meter_dict)
+    #print (meter_dict)
+    logging.debug("-------------------------------------------------")
+
+def validate_sub_info(sub_info):
+    if type(sub_info) is not list:
+        sub_info = [sub_info]
+    for meter in sub_info:
+        if meter.startswith("*") or meter.startswith("!"):
+            err_str = "Given meter is not supported:" + meter + "\n"
+            logging.error("* Given meter is not supported:%s",meter)
+            raise Exception (err_str)
+
+def update_meter_dict(meterinfo,app_id):
+    try :
+         if type(meterinfo) == list:
+             for meter in meterinfo:  
+                 if meter_dict.get(meter) is None:
+                     meter_dict[meter] = [app_id]
+                 elif app_id not in meter_dict.get(meter):
+                     meter_dict.get(meter).append(app_id)
+         else:
+             if meter_dict.get(meterinfo) is None:
+                 meter_dict[meterinfo] = [app_id]
+             elif app_id not in meter_dict.get(meterinfo):
+                 meter_dict.get(meterinfo).append(app_id)
+    except Exception as e:
+         logging.error("* %s",e.__str__())
+
+def check_send_msg_confmgmt_del(sub_info,app_id):
+    temp_sub_info=[]
+    temm_meter_info = None
+    if len(meter_list) == 0:
+        #print("No subscription exists")
+        logging.info("No subscription exists")
+        return False,None
+    if type(sub_info) == list:
+       for meterinfo in sub_info:
+           if meter_dict.get(meterinfo) is None:
+              logging.warning("%s meter doesn't exist in the meter dict",meterinfo)
+              continue 
+           if app_id in meter_dict.get(meterinfo):
+               if len(meter_dict.get(meterinfo)) == 1:
+                   #print "Only single app is subscribing this meter"
+                   logging.info("Only single app is subscribing this meter")
+                   del meter_dict[meterinfo]
+                   temp_sub_info.append(meterinfo)
+                   if meterinfo in meter_list:
+                       meter_list.remove(meterinfo)
+               else:
+                   meter_dict.get(meterinfo).remove(app_id)  
+       return True,temp_sub_info 
+    else :
+         if meter_dict.get(sub_info) is None:
+              logging.warning("%s meter doesn't exist in the meter dict",sub_info)
+              return False,None 
+         if app_id in meter_dict.get(sub_info):
+             if len(meter_dict.get(sub_info)) == 1:
+                  #print "Only single app is subscribing this meter"
+                  logging.info("Only single app is subscribing this meter")
+                  del meter_dict[sub_info]
+                  if sub_info in meter_list:
+                     meter_list.remove(sub_info)
+                  return True,sub_info   
+             else:
+                 meter_dict.get(sub_info).remove(app_id)
+    return False,None 
+     
+def check_send_msg_confmgmt_add(sub_info,app_id):
+    temp_sub_info=[]
+    update_meter_dict(sub_info,app_id)
+    #import pdb;pdb.set_trace()
+    if len(meter_list) == 0:
+        logging.info("No subinfo exits")
+        if type(sub_info) == list:
+            for j in sub_info:
+                meter_list.append(j)
+            return True,sub_info
+        else :
+            meter_list.append(sub_info)
+            return True,sub_info
+    if type(sub_info) == list:
+        for j in sub_info:
+            if j in meter_list:
+                #print ("meter already exists",j)
+                logging.info("meter already exist:%s",j)
+                continue
+            else :
+                 temp_sub_info.append(j)  
+                 meter_list.append(j)
+        if temp_sub_info is not None:
+            return True,temp_sub_info
+        else :
+            return False,None
+    else :
+         if sub_info not in meter_list:         
+             meter_list.append(sub_info)
+             #print ("subscription for  meter doesn't exist",sub_info)
+             logging.warning("subscription for  meter doesn't exist:%s",sub_info)
+             return True,sub_info
+         else :  
+             #print ("subscription already exist for ",sub_info)
+             logging.info("subscription already exist for:%s ",sub_info)
+             return False,sub_info         
+
+def update_pipeline_conf(sub_info,target,app_id,flag):
+    import pika
+
+    logging.debug("* sub_info:%s",sub_info)
+    logging.debug("* target:%s",target)
+  
+    #msg={"sub_info":sub_info,"target":target,"action":flag}
+    
+    #json_msg=json.dumps(msg)
+    #msg="image"
+    meter_sub_info = None
+    if flag == "ADD":
+       status,meter_sub_info=check_send_msg_confmgmt_add(sub_info,app_id)
+       if status == False or meter_sub_info == None or meter_sub_info == []:
+           logging.warning("%s is already subscribed with the conf mgmt")
+           return 
+    elif flag == "DEL": 
+       status,meter_sub_info=check_send_msg_confmgmt_del(sub_info,app_id)
+       if status == False or meter_sub_info == None or meter_sub_info == []:
+           logging.warning("%s is already unsubscribed with the conf mgmt")
+           return 
+    try :
+        config = ConfigParser.ConfigParser()
+        config.read('pub_sub.conf')
+        rabbitmq_username = config.get('RABBITMQ','Rabbitmq_username')
+        rabbitmq_passwd = config.get('RABBITMQ','Rabbitmq_passwd')
+        rabbitmq_host = config.get('RABBITMQ','Rabbitmq_host')
+        rabbitmq_port = int ( config.get('RABBITMQ','Rabbitmq_port') )
+
+        ceilometer_client_info = config.get('CLIENT','target')
+        #msg={"sub_info":sub_info,"target":ceilometer_client_info,"action":flag}
+        msg={"sub_info":meter_sub_info,"target":ceilometer_client_info,"action":flag}
+        #print msg
+        json_msg=json.dumps(msg)
+
+        credentials = pika.PlainCredentials(rabbitmq_username,rabbitmq_passwd)
+        parameters = pika.ConnectionParameters(rabbitmq_host,
+                                               rabbitmq_port,
+                                               '/',
+                                               credentials)
+        connection = pika.BlockingConnection(parameters)
+        properties = pika.BasicProperties(content_type = "application/json")
+        channel = connection.channel()
+        channel.exchange_declare(exchange='pubsub',
+                         type='fanout')
+ 
+        channel.basic_publish(exchange='pubsub',
+                              routing_key='',
+                              properties = properties, 
+                              body=json_msg)
+        logging.debug(" [x] %s Sent",msg)
+        logging.info(" [x] %s Sent",msg)
+        connection.close() 
+    except Exception as e:
+           logging.error("Error:%s",e.__str__())
+  
+def read_notification_from_ceilometer(host,port):
+     UDP_IP = host 
+     UDP_PORT = port
+ 
+     logging.debug("* Sarting UDP Client on ip:%s , port:%d",UDP_IP,UDP_PORT) 
+     udp = socket.socket(socket.AF_INET, # Internet
+                          socket.SOCK_DGRAM) # UDP
+     udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+     udp.bind((UDP_IP, UDP_PORT))
+     
+     while True:
+            #print thread.get_ident() 
+            #logging.debug("thread.get_ident():%s", thread.get_ident()) 
+            data, source = udp.recvfrom(64 * units.Ki)
+            sample = msgpack.loads(data, encoding='utf-8')
+            #logging.debug("* -------------------------------------------------------")
+            logging.debug("%s",sample)
+            #print(sample)
+            for obj in sub_info:
+                msg_list = []
+                if obj.scheme == "udp" :
+                    if type(obj.subscription_info) is list:
+                        for info in obj.subscription_info:
+                            msg_list.append(fnmatch.fnmatch(sample['counter_name'],info))
+                    else :
+                        msg_list.append(fnmatch.fnmatch(sample['counter_name'],obj.subscription_info)) 
+                    try:  
+                        if reduce(operator.or_, msg_list): 
+                            host = obj.ipaddress
+                            port = int(obj.portno)
+                            l=[]
+                            #logging.debug("* -------------------------------------------------------")
+                            if obj.sub_info_filter is None:
+                                try:  
+                                    logging.debug("* Sending data without query over UDP for host:%s and port:%s",host,port) 
+                                    udp.sendto(data,(host,port))
+                                except Exception as e:
+                                    logging.error ("Unable to send sample over UDP for %s and %s,%s",host,port,e.__str__())
+                                    ret_str = ("Unable to send sample over UDP for %s and %s,%s")%(host,port,e.__str__())
+                                continue 
+                            for i in range(len(obj.sub_info_filter)):
+                                if obj.sub_info_filter[i]['op'] in COMPARATORS:
+                                    op = COMPARATORS[obj.sub_info_filter[i]['op']]
+                                    logging.debug("* obj.sub_info_filter[i]['value']:%s",obj.sub_info_filter[i]['value'])
+                                    logging.debug("* obj.sub_info_filter[i]['field']:%s",obj.sub_info_filter[i]['field'])
+                                    l.append(op(obj.sub_info_filter[i]['value'],sample[obj.sub_info_filter[i]['field']]))
+                                    logging.info("* Logical and of Query %s",l)    
+                                else:
+                                    logging.deubg("* Not a valid operator ignoring app_id:%s",obj.app_id)
+                                    l.append(False)
+                                    logging.info("* Logical and of Query %s",l)    
+                            if reduce(operator.and_, l):
+                                try:  
+                                    logging.debug("* Sending data over UDP for host:%s and port:%s",host,port) 
+                                    udp.sendto(data,(host,port))
+                                except Exception:
+                                    logging.error ("Unable to send sample over UDP for %s and %s ",host,port)
+                                    ret_str = ("Unable to send sample over UDP for %s and %s ")%(host,port)
+                            else :
+                                 logging.warning("* No Notification found with the given subscription")
+                        else :
+                            logging.warning("* No valid subscrition found for %s",obj.app_id)
+                    except Exception as e:
+                       logging.error("Key_Error:%s ",e.__str__())
+                       ret_str = ("Key_Error:%s \n")% e.__str__()
+
+def read_notification_from_ceilometer_over_udp(host,port):
+    UDP_IP = host
+    UDP_PORT = port
+
+    logging.debug("* Sarting UDP Client on ip:%s , port:%d",UDP_IP,UDP_PORT)
+    udp = socket.socket(socket.AF_INET, # Internet
+                          socket.SOCK_DGRAM) # UDP
+    udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+    udp.bind((UDP_IP, UDP_PORT))
+
+    while True:
+        #print thread.get_ident()
+        #logging.debug("thread.get_ident():%s", thread.get_ident())
+        data, source = udp.recvfrom(64 * units.Ki)
+        sample = msgpack.loads(data, encoding='utf-8')
+        status = process_ceilometer_message(sample,data)
+  
+def read_notification_from_ceilometer_over_kafka(parse_target):
+    logging.info("Kafka target:%s",parse_target)
+    try :
+        kafka_publisher=kafka_broker.KafkaBrokerPublisher(parse_target)
+        for message in kafka_publisher.kafka_consumer:
+            #print message.value
+            #logging.debug("%s",message.value)
+            #logging.info("%s",message.value)
+            status = process_ceilometer_message(json.loads(message.value),message.value)
+            #print status
+    except Exception as e:
+        logging.error("Error in Kafka setup:%s ",e.__str__())
+
+def process_ceilometer_message(sample,data):
+    logging.debug("%s",sample)
+    #logging.info("%s",sample)
+    if len(sub_info) < 1:
+        #print  "No subscription exists"
+        return
+    for obj in sub_info:
+         #import pdb;pdb.set_trace()
+         msg_list = []
+         if type(obj.subscription_info) is list:
+             for info in obj.subscription_info:
+                 msg_list.append(fnmatch.fnmatch(sample['counter_name'],info))
+         else :
+             msg_list.append(fnmatch.fnmatch(sample['counter_name'],obj.subscription_info))
+         try:
+             if reduce(operator.or_, msg_list):
+                 ''' 
+                 kafka_publisher = None
+                 if obj.scheme == "kafka" :
+		    parse_target=netutils.urlsplit(obj.target)
+	            try :
+		        kafka_publisher=kafka_broker.KafkaBrokerPublisher(parse_target)
+                    except Exception as e:
+                        logging.error("* Error in connecting kafka broker:%s",e.__str__())
+                       # return False
+                        continue 
+                 '''
+                 host = obj.ipaddress
+                 port = int(obj.portno)
+                 l=[]
+                 logging.debug("* -------------------------------------------------------")
+                 if obj.sub_info_filter is None:
+                     try:
+                         if obj.scheme == "udp" :
+                              #logging.debug("* Sending data without query over UDP for host:%s and port:%s",host,port)
+                              #logging.info("* Sending data without query over UDP for host:%s and port:%s",host,port)
+                              #udp = socket.socket(socket.AF_INET, # Internet
+                              #                     socket.SOCK_DGRAM) # UDP
+                              #udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 
+                              obj.udp.sendto(data,(host,port))
+                              #return True
+                              continue
+                         elif obj.scheme == "kafka" :
+                              #logging.debug("* Sending data over kafka for host:%s and port:%s and topec:%s",host,port,kafka_publisher.topic)
+                              #logging.info("* Sending data over kafka for host:%s and port:%s and topec:%s",host,port,kafka_publisher.topic)
+                              obj.kafka_publisher._send(sample)  
+                              #return True
+                              continue                                  
+                     except Exception as e:
+                          logging.error ("Unable to send sample over UDP/kafka for %s and %s,%s",host,port,e.__str__())
+                          ret_str = ("Unable to send sample over UDP for %s and %s,%s ")%(host,port,e.__str__())
+                          #return False
+                          continue 
+                 for i in range(len(obj.sub_info_filter)):
+                     if obj.sub_info_filter[i]['op'] in COMPARATORS:
+                          op = COMPARATORS[obj.sub_info_filter[i]['op']]
+                          #logging.debug("* obj.sub_info_filter[i]['value']:%s",obj.sub_info_filter[i]['value'])
+                          #logging.debug("* obj.sub_info_filter[i]['field']:%s",obj.sub_info_filter[i]['field'])
+                          l.append(op(obj.sub_info_filter[i]['value'],sample[obj.sub_info_filter[i]['field']]))
+                          #logging.info("* Logical and of Query %s",l)
+                     else:
+                          logging.info("* Not a valid operator ignoring app_id:%s",obj.app_id)
+                          l.append(False)
+                          #logging.info("* Logical and of Query %s",l)
+                 if reduce(operator.or_, l):
+                     try:
+                         if obj.scheme == "udp" :
+                              logging.debug("* Sending data over UDP for host:%s and port:%s",host,port)
+                              #udp = socket.socket(socket.AF_INET, # Internet
+                              #                    socket.SOCK_DGRAM) # UDP
+                              #udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+                              obj.udp.sendto(data,(host,port))
+                              #return True
+                              continue
+                         elif obj.scheme == "kafka" :
+                              logging.debug("* Sending data over kafka for host:%s and port:%s and topec:%s",host,port,obj.kafka_publisher.topic)
+                              obj.kafka_publisher._send(sample)  
+                              #return True
+                              continue                                  
+                     except Exception:
+                         logging.error ("Unable to send sample over UDP/Kafka for %s and %s ",host,port)
+                         ret_str = ("Unable to send sample over UDP/Kafka for %s and %s ")%(host,port)
+                         #return False
+                         continue   
+                 else :
+		       logging.debug("* No Notification found with the given subscription")
+                       continue
+             else :
+                  logging.debug("* No matching subscrition found for %s",sample['counter_name'])
+                  continue
+         except Exception as e:
+             logging.error("Key_Error:%s ",e.__str__())
+             ret_str = ("Key_Error:%s \n")%e.__str__()
+             #return False
+             continue
+
+def initialize(ceilometer_client):
+     logging.debug("Ceilometer client info:%s",ceilometer_client)
+     parse_target=netutils.urlsplit(ceilometer_client)
+     if not parse_target.netloc:
+        err_str = "Error:Invalid client format"
+        logging.error("* Invalid client format")
+        return err_str
+     if parse_target.scheme == "udp" :
+         host,port=netutils.parse_host_port(parse_target.netloc)
+         scheme = parse_target.scheme
+         app_ip = host
+         app_port = port
+         if host == None or port == None :
+             err_str = "* Error: Invalid IP Address format"
+             logging.error("* Invalid IP Address format")
+             return err_str
+         thread.start_new(read_notification_from_ceilometer_over_udp,(host,port,))
+     elif parse_target.scheme == "kafka" :
+         thread.start_new(read_notification_from_ceilometer_over_kafka,(parse_target,))
+     
+        
+if __name__ == "__main__":
+
+    try:
+        config = ConfigParser.ConfigParser()
+        config.read('pub_sub.conf')
+        webserver_host = config.get('WEB_SERVER','webserver_host')
+        webserver_port = int (config.get('WEB_SERVER','webserver_port'))
+       # client_host    = config.get('CLIENT','client_host')
+      #  client_port    = int (config.get('CLIENT','client_port'))
+        ceilometer_client_info = config.get('CLIENT','target')
+        '''  
+        log_level      = config.get('LOGGING','level')
+        log_file       = config.get('LOGGING','filename')
+        maxbytes       = int (config.get('LOGGING','maxbytes'))
+        backupcount    = int (config.get('LOGGING','backupcount'))
+        level = LEVELS.get(log_level, logging.NOTSET)
+        '''
+        logging.config.fileConfig('pub_sub.conf', disable_existing_loggers=False)
+        ''' 
+        logging.basicConfig(filename=log_file,format='%(asctime)s %(levelname)s %(message)s',\
+                    datefmt=_DEFAULT_LOG_DATE_FORMAT,level=level)
+
+        # create rotating file handler
+        
+        rfh = logging.handlers.RotatingFileHandler(
+                 log_file, encoding='utf8', maxBytes=maxbytes,
+                 backupCount=backupcount,delay=0)
+        logging.getLogger().addHandler(rfh)
+        '''
+         
+    except Exception as e:
+        print("* Error in config file:",e.__str__())
+        #logging.error("* Error in confing file:%s",e.__str__())
+    else: 
+        #initialize(client_host,client_port)
+        initialize(ceilometer_client_info)
+        app.run(host=webserver_host,port=webserver_port,debug=False)
diff --git a/xos/synchronizer/ceilometer/ceilometer_pub_sub/test/kafka_broker.py b/xos/synchronizer/ceilometer/ceilometer_pub_sub/test/kafka_broker.py
new file mode 100644
index 0000000..ce495fc
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_pub_sub/test/kafka_broker.py
@@ -0,0 +1,164 @@
+#
+# Copyright 2015 Cisco Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import kafka
+from oslo_config import cfg
+from oslo_utils import netutils
+from six.moves.urllib import parse as urlparse
+import logging as LOG
+
+
+class KafkaBrokerPublisher():
+    def __init__(self, parsed_url):
+        self.kafka_client = None
+        self.kafka_server = None
+
+        self.host, self.port = netutils.parse_host_port(
+            parsed_url.netloc, default_port=9092)
+
+        self.local_queue = []
+
+        params = urlparse.parse_qs(parsed_url.query)
+        self.topic = params.get('topic', ['ceilometer'])[-1]
+        self.policy = params.get('policy', ['default'])[-1]
+        self.max_queue_length = int(params.get(
+            'max_queue_length', [1024])[-1])
+        self.max_retry = int(params.get('max_retry', [100])[-1])
+
+        if self.policy in ['default', 'drop', 'queue']:
+            LOG.info(('Publishing policy set to %s') % self.policy)
+        else:
+            LOG.warn(('Publishing policy is unknown (%s) force to default')
+                     % self.policy)
+            self.policy = 'default'
+
+        try:
+            self._get_client()
+            self._get_server()
+        except Exception as e:
+            LOG.exception("Failed to connect to Kafka service: %s", e)
+
+    def publish_samples(self, context, samples):
+        """Send a metering message for kafka broker.
+
+        :param context: Execution context from the service or RPC call
+        :param samples: Samples from pipeline after transformation
+        """
+        samples_list = [
+            utils.meter_message_from_counter(
+                sample, cfg.CONF.publisher.telemetry_secret)
+            for sample in samples
+        ]
+
+        self.local_queue.append(samples_list)
+
+        try:
+            self._check_kafka_connection()
+        except Exception as e:
+            raise e
+
+        self.flush()
+
+    def flush(self):
+        queue = self.local_queue
+        self.local_queue = self._process_queue(queue)
+        if self.policy == 'queue':
+            self._check_queue_length()
+
+    def publish_events(self, context, events):
+        """Send an event message for kafka broker.
+
+        :param context: Execution context from the service or RPC call
+        :param events: events from pipeline after transformation
+        """
+        events_list = [utils.message_from_event(
+            event, cfg.CONF.publisher.telemetry_secret) for event in events]
+
+        self.local_queue.append(events_list)
+
+        try:
+            self._check_kafka_connection()
+        except Exception as e:
+            raise e
+
+        self.flush()
+
+    def _process_queue(self, queue):
+        current_retry = 0
+        while queue:
+            data = queue[0]
+            try:
+                self._send(data)
+            except Exception:
+                LOG.warn(("Failed to publish %d datum"),
+                         sum([len(d) for d in queue]))
+                if self.policy == 'queue':
+                    return queue
+                elif self.policy == 'drop':
+                    return []
+                current_retry += 1
+                if current_retry >= self.max_retry:
+                    self.local_queue = []
+                    LOG.exception(("Failed to retry to send sample data "
+                                      "with max_retry times"))
+                    raise
+            else:
+                queue.pop(0)
+        return []
+
+    def _check_queue_length(self):
+        queue_length = len(self.local_queue)
+        if queue_length > self.max_queue_length > 0:
+            diff = queue_length - self.max_queue_length
+            self.local_queue = self.local_queue[diff:]
+            LOG.warn(("Kafka Publisher max local queue length is exceeded, "
+                     "dropping %d oldest data") % diff)
+
+    def _check_kafka_connection(self):
+        try:
+            self._get_client()
+        except Exception as e:
+            LOG.exception(_LE("Failed to connect to Kafka service: %s"), e)
+
+            if self.policy == 'queue':
+                self._check_queue_length()
+            else:
+                self.local_queue = []
+            raise Exception('Kafka Client is not available, '
+                            'please restart Kafka client')
+
+    def _get_client(self):
+        if not self.kafka_client:
+            self.kafka_client = kafka.KafkaClient(
+                "%s:%s" % (self.host, self.port))
+            self.kafka_producer = kafka.SimpleProducer(self.kafka_client)
+    
+    def _get_server(self):
+        if not self.kafka_server:
+           self.kafka_server = kafka.KafkaClient(
+                "%s:%s" % (self.host, self.port))
+           self.kafka_consumer = kafka.KafkaConsumer(self.topic,bootstrap_servers = ["%s:%s" % (self.host, self.port)])
+
+
+    def _send(self, data):
+        #for d in data:
+            try:
+                self.kafka_producer.send_messages(
+                    self.topic, json.dumps(data))
+            except Exception as e:
+                LOG.exception(("Failed to send sample data: %s"), e)
+                raise
diff --git a/xos/synchronizer/ceilometer/ceilometer_pub_sub/test/kafka_client.py b/xos/synchronizer/ceilometer/ceilometer_pub_sub/test/kafka_client.py
new file mode 100644
index 0000000..4d7cff0
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_pub_sub/test/kafka_client.py
@@ -0,0 +1,20 @@
+import kafka
+import kafka_broker
+from oslo_utils import netutils
+import logging
+
+def read_notification_from_ceilometer_over_kafka(parse_target):
+    logging.info("Kafka target:%s",parse_target)
+    try :
+        kafka_publisher=kafka_broker.KafkaBrokerPublisher(parse_target)
+        for message in kafka_publisher.kafka_consumer:
+            #print message.value
+            logging.info("%s",message.value)
+            #print status
+    except Exception as e:
+        logging.error("Error in Kafka setup:%s ",e.__str__())
+
+ceilometer_client="kafka://10.11.10.1:9092?topic=test"
+logging.basicConfig(format='%(asctime)s %(filename)s %(levelname)s %(message)s',filename='kafka_client.log',level=logging.INFO)
+parse_target=netutils.urlsplit(ceilometer_client)
+read_notification_from_ceilometer_over_kafka(parse_target)
diff --git a/xos/synchronizer/ceilometer/ceilometer_pub_sub/test/udp_client_cpu.py b/xos/synchronizer/ceilometer/ceilometer_pub_sub/test/udp_client_cpu.py
new file mode 100644
index 0000000..1c30d63
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_pub_sub/test/udp_client_cpu.py
@@ -0,0 +1,22 @@
+import socket
+import msgpack
+from oslo_utils import units
+import logging
+UDP_IP = "10.11.10.1"
+UDP_PORT = 5006
+
+logging.basicConfig(format='%(asctime)s %(filename)s %(levelname)s %(message)s',filename='udp_client.log',level=logging.INFO)
+udp = socket.socket(socket.AF_INET, # Internet
+                     socket.SOCK_DGRAM) # UDP
+udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+udp.bind((UDP_IP, UDP_PORT))
+while True:
+     data, source = udp.recvfrom(64 * units.Ki)
+     #print data
+     #try:
+     sample = msgpack.loads(data, encoding='utf-8')
+     logging.info("%s",sample)
+     print sample
+     #except Exception:
+         #logging.info("%s",sample)
+     #    print ("UDP: Cannot decode data sent by %s"), source
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/README b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/README
new file mode 100644
index 0000000..76040e5
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/README
@@ -0,0 +1,50 @@
+# Readme to build custom image.
+VM Building:
+1.Download ubuntu 14.04 server version and create a VM using virt-manager(kvm hypervisor).
+Note: Can use virtual box also instead of virt-manger.
+2.Create user ubuntu with sudo previlages.
+3.git clone https://gerrit.opencord.org/monitoring
+4.Go to /home/ubuntu/monitoring/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts
+  Edit i.STARTUP_PATH points to startup-scripts dir,
+       ii.IP variable in ceilometer_init_script.sh for accessing keystone from outside the vm.
+       iii.PUB_SUB_PATH point to ceilometer_pub_sub dir
+5.Go to /home/ubuntu/monitoring/xos/synchronizer/ceilometer/ceilometer_service_custom_image
+6.run install.sh
+  ./install.sh
+7.With step 4 ceilometer setup(keystone,musql,rabbitmq),zookeeper and kafka will be installed.
+8.Go to /home/ubuntu/monitoring/xos/synchronizer/ceilometer/ceilometer-plugins
+   i.cp -rf network/ext_services/ /usr/lib/python2.7/dist-packages/ceilometer/network/
+   ii.Update entry_point.txt:
+       copy the contents of ceilometer_entry_points.txt to /usr/lib/python2.7/dist-packages/ceilometer-6.0.0.egg-info/entry_points.txt under [ceilometer.notification]
+9. Update /home/ubuntu/monitoring/xos/synchronizer/ceilometer/ceilometer_pub_sub/pub_sub.conf with proper IP and port numbers and rabbitmq credentials.
+10.update /etc/ceilmeter/pipeline.yaml with required publisher info for coressponding meters.
+   ex:
+    sources:
+    - name: meter_source
+      interval: 600
+      meters:
+          - "*"
+          - "!vsg"
+          - "!vsg.*"
+    sinks:
+          - meter_sink
+    - name: meter_sink
+      transformers:
+      publishers:
+          - notifier://
+          - kafka://localhost:9092?topic=ceilometer 
+  
+11.This setup by default created ceilometer topic for kafka.If any other topic is needed to use in this setup please
+   create the topic before using it wih the following command.
+  /opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic <topic name>
+12.Reboot the system.
+13.After rebooting pleasing check following services are running using ps command:
+  i.ceilometer services.
+  ii.kafka and zookeeper services
+  iii.sub_main.py script for pub-sub.
+14.Shoutdown the VM.
+15.Copy the vm image to tmp folder.
+16.convert and compress the image using the following command:
+  $sudo qemu-img convert -O qcow2 -c ceilometer-trusty-server-multi-nic.qcow2 ceilometer-trusty-server-multi-nic.compressed.qcow2
+17.The compressed image can be run as a openstack vm for ceilometer service.
+18.--
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/install.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/install.sh
new file mode 100755
index 0000000..e6bfde8
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/install.sh
@@ -0,0 +1,37 @@
+set -x
+echo $PWD
+export CMDIR=$PWD
+echo $CMDIR
+
+echo "-------------------------------------------------------------------"
+echo " --------------------Installing Ceilometer-------------------------"
+echo "-------------------------------------------------------------------"
+cd $CMDIR/mitaka-v2
+sh install_ansible.sh
+echo $'[local]\nlocalhost' | sudo tee --append /etc/ansible/hosts > /dev/null
+ansible-playbook -c local os_ceilometer_mitaka.yml 
+source admin-openrc.sh
+echo "-------------------------------------------------------------------"
+
+echo "-------------------------------------------------------------------"
+echo " --------------------Installing Kafka------------------------------"
+echo "-------------------------------------------------------------------"
+sudo apt-get -y install python-pip
+sudo pip install kafka
+sudo pip install flask
+cd $CMDIR/kafka-installer/
+./install_zookeeper_kafka.sh install
+cd $CMDIR 
+echo "-------------------------------------------------------------------"
+
+
+echo "-------------------------------------------------------------------"
+echo " --------------------Installing InitScript-------------------------"
+echo "-------------------------------------------------------------------"
+cd $CMDIR/startup-scripts
+echo " Installing startup script"
+sudo cp zxceilostartup.sh /etc/init.d
+sudo chmod a+x /etc/init.d/zxceilostartup.sh
+sudo update-rc.d zxceilostartup.sh defaults
+chmod 755 $CMDIR/startup-scripts/ceilometer_init_script.sh
+echo "-------------------------------------------------------------------"
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/kafka-server-start.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/kafka-server-start.sh
new file mode 100644
index 0000000..87b31d5
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/kafka-server-start.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];
+then
+	echo "USAGE: $0 [-daemon] server.properties"
+	exit 1
+fi
+base_dir=$(dirname $0)
+export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
+export KAFKA_HEAP_OPTS="-Xms256m -Xmx256m"
+
+EXTRA_ARGS="-name kafkaServer -loggc"
+
+COMMAND=$1
+case $COMMAND in
+  -daemon)
+    EXTRA_ARGS="-daemon "$EXTRA_ARGS
+    shift
+    ;;
+  *)
+    ;;
+esac
+
+exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka $@
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/kafka.conf b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/kafka.conf
new file mode 100644
index 0000000..3008f0f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/kafka.conf
@@ -0,0 +1,35 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+description "Kafka"
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+respawn
+
+limit nofile 32768 32768
+
+# If zookeeper is running on this box also give it time to start up properly
+pre-start script
+    if [ -e /etc/init.d/zookeeper ]; then
+        /etc/init.d/zookeeper start || true
+    fi
+end script
+
+# Rather than using setuid/setgid sudo is used because the pre-start task must run as root
+exec sudo -Hu kafka -g kafka KAFKA_HEAP_OPTS="-Xmx128m" /opt/kafka/bin/kafka-server-start.sh /etc/kafka/server.properties
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/log4j.properties b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/log4j.properties
new file mode 100644
index 0000000..5d24e87
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/log4j.properties
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+log4j.rootLogger=WARN, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
+log4j.appender.kafkaAppender.MaxFileSize=50MB
+log4j.appender.kafkaAppender.MaxBackupIndex=4
+log4j.appender.kafkaAppender.File=/var/log/kafka/server.log
+log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
+log4j.appender.stateChangeAppender.MaxFileSize=50MB
+log4j.appender.stateChangeAppender.MaxBackupIndex=4
+log4j.appender.stateChangeAppender.File=/var/log/kafka/state-change.log
+log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
+log4j.appender.controllerAppender.MaxFileSize=50MB
+log4j.appender.controllerAppender.MaxBackupIndex=4
+log4j.appender.controllerAppender.File=/var/log/kafka/controller.log
+log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+# Turn on all our debugging info
+#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
+#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
+#log4j.logger.kafka.perf=DEBUG, kafkaAppender
+#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
+#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
+log4j.logger.kafka=WARN, kafkaAppender
+
+# Tracing requests results in large logs
+#log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
+#log4j.appender.requestAppender.MaxFileSize=50MB
+#log4j.appender.requestAppender.MaxBackupIndex=4
+#log4j.appender.requestAppender.File=/var/log/kafka/kafka-request.log
+#log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
+#log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+#
+#log4j.logger.kafka.network.RequestChannel$=TRACE, requestAppender
+#log4j.additivity.kafka.network.RequestChannel$=false
+#
+#log4j.logger.kafka.network.Processor=TRACE, requestAppender
+#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
+#log4j.additivity.kafka.server.KafkaApis=false
+#log4j.logger.kafka.request.logger=TRACE, requestAppender
+#log4j.additivity.kafka.request.logger=false
+
+log4j.logger.kafka.controller=TRACE, controllerAppender
+log4j.additivity.kafka.controller=false
+
+log4j.logger.state.change.logger=TRACE, stateChangeAppender
+log4j.additivity.state.change.logger=false
+
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/server.properties b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/server.properties
new file mode 100644
index 0000000..ec6b1b3
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/kafka/server.properties
@@ -0,0 +1,119 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+############################# Socket Server Settings #############################
+
+# The port the socket server listens on
+port=9092
+
+# Hostname the broker will bind to. If not set, the server will bind to all interfaces
+host.name=127.0.0.1
+
+# Hostname the broker will advertise to producers and consumers. If not set, it uses the
+# value for "host.name" if configured.  Otherwise, it will use the value returned from
+# java.net.InetAddress.getCanonicalHostName().
+#advertised.host.name=<hostname routable by clients>
+
+# The port to publish to ZooKeeper for clients to use. If this is not set,
+# it will publish the same port that the broker binds to.
+#advertised.port=<port accessible by clients>
+
+# The number of threads handling network requests
+num.network.threads=2
+
+# The number of threads doing disk I/O
+num.io.threads=2
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=1048576
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=1048576
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma separated list of directories under which to store log files
+log.dirs=/var/kafka
+
+auto.create.topics.enable=false
+# The number of logical partitions per topic per server. More partitions allow greater parallelism
+# for consumption, but also mean more files.
+num.partitions=2
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+#    1. Durability: Unflushed data may be lost if you are not using replication.
+#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=24
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
+# segments don't drop below log.retention.bytes.
+log.retention.bytes=104857600
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=104857600
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=60000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=127.0.0.1:2181
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=1000000
+
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/environment b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/environment
new file mode 100644
index 0000000..afa2d2f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/environment
@@ -0,0 +1,36 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Modified from http://packages.ubuntu.com/saucy/zookeeperd
+NAME=zookeeper
+ZOOCFGDIR=/etc/zookeeper/conf
+
+# seems, that log4j requires the log4j.properties file to be in the classpath
+CLASSPATH="$ZOOCFGDIR:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar"
+
+ZOOCFG="$ZOOCFGDIR/zoo.cfg"
+ZOO_LOG_DIR=/var/log/zookeeper
+USER=$NAME
+GROUP=$NAME
+PIDDIR=/var/run/$NAME
+PIDFILE=$PIDDIR/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+JAVA=/usr/bin/java
+ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
+JMXLOCALONLY=false
+JAVA_OPTS=""
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/log4j.properties b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/log4j.properties
new file mode 100644
index 0000000..6c45a4a
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/log4j.properties
@@ -0,0 +1,69 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# From http://packages.ubuntu.com/saucy/zookeeperd
+
+# ZooKeeper Logging Configuration
+#
+
+# Format is "<default threshold> (, <appender>)+
+
+log4j.rootLogger=${zookeeper.root.logger}
+
+# Example: console appender only
+# log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=WARN
+log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/myid b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/myid
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/myid
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/zoo.cfg b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/zoo.cfg
new file mode 100644
index 0000000..b8f5582
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/conf/zookeeper/zoo.cfg
@@ -0,0 +1,74 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html
+
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+dataDir=/var/lib/zookeeper
+# Place the dataLogDir to a separate physical disc for better performance
+# dataLogDir=/disk2/zookeeper
+
+# the port at which the clients will connect
+clientPort=2181
+
+# Maximum number of clients that can connect from one client
+maxClientCnxns=60
+
+# specify all zookeeper servers
+# The fist port is used by followers to connect to the leader
+# The second one is used for leader election
+
+server.0=127.0.0.1:2888:3888
+
+# To avoid seeks ZooKeeper allocates space in the transaction log file in
+# blocks of preAllocSize kilobytes. The default block size is 64M. One reason
+# for changing the size of the blocks is to reduce the block size if snapshots
+# are taken more often. (Also, see snapCount).
+#preAllocSize=65536
+
+# Clients can submit requests faster than ZooKeeper can process them,
+# especially if there are a lot of clients. To prevent ZooKeeper from running
+# out of memory due to queued requests, ZooKeeper will throttle clients so that
+# there is no more than globalOutstandingLimit outstanding requests in the
+# system. The default limit is 1,000.ZooKeeper logs transactions to a
+# transaction log. After snapCount transactions are written to a log file a
+# snapshot is started and a new transaction log file is started. The default
+# snapCount is 10,000.
+#snapCount=1000
+
+# If this option is defined, requests will be will logged to a trace file named
+# traceFile.year.month.day.
+#traceFile=
+
+# Leader accepts client connections. Default value is "yes". The leader machine
+# coordinates updates. For higher update throughput at thes slight expense of
+# read throughput the leader can be configured to not accept clients and focus
+# on coordination.
+#leaderServes=yes
+
+# Autopurge every hour to avoid using lots of disk in bursts
+# Order of the next 2 properties matters.
+# autopurge.snapRetainCount must be before autopurge.purgeInterval.
+autopurge.snapRetainCount=3
+autopurge.purgeInterval=1
\ No newline at end of file
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/install_zookeeper_kafka.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/install_zookeeper_kafka.sh
new file mode 100755
index 0000000..f21a353
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/kafka-installer/install_zookeeper_kafka.sh
@@ -0,0 +1,174 @@
+#!/bin/bash
+set -x
+BASE_KAFKA_VERSION=0.9.0.0
+KAFKA_VERSION=2.11-0.9.0.0
+export CONF_BASE=$PWD
+echo $CONF_BASE
+sudo sed -i "s/.*127.0.0.1.*/127.0.0.1 localhost $(hostname)/" /etc/hosts
+
+function install_openjdk_7_jdk {
+
+    echo "Install   openjdk_7_jdk"
+
+    sudo apt-get -y install openjdk-7-jdk
+
+}
+
+function clean_openjdk_7_jdk {
+
+    echo "Clean   openjdk_7_jdk"
+
+    sudo apt-get -y purge openjdk-7-jdk
+
+    sudo apt-get -y autoremove
+
+}
+
+function install_zookeeper {
+
+    echo "Install   Zookeeper"
+
+    sudo apt-get -y install zookeeperd
+
+    sudo cp "${CONF_BASE}"/conf/zookeeper/zoo.cfg /etc/zookeeper/conf/zoo.cfg
+
+    if [[ ${SERVICE_HOST} ]]; then
+
+        sudo sed -i "s/server\.0=127\.0\.0\.1/server.0=${SERVICE_HOST}/g" /etc/zookeeper/conf/zoo.cfg
+
+    fi
+
+    sudo cp "${CONF_BASE}"/conf/zookeeper/myid /etc/zookeeper/conf/myid
+
+    sudo cp "${CONF_BASE}"/conf/zookeeper/environment /etc/zookeeper/conf/environment
+
+    sudo mkdir -p /var/log/zookeeper || true
+
+    sudo chmod 755 /var/log/zookeeper
+
+    sudo cp "${CONF_BASE}"/conf/zookeeper/log4j.properties /etc/zookeeper/conf/log4j.properties
+
+    sudo start zookeeper || sudo restart zookeeper
+}
+
+function clean_zookeeper {
+
+    echo "Clean   Zookeeper"
+
+    sudo stop zookeeper || true
+
+    sudo apt-get -y purge zookeeperd
+
+    sudo apt-get -y purge zookeeper
+
+    sudo rm -rf /etc/zookeeper
+
+    sudo rm -rf  /var/log/zookeeper
+
+    sudo rm -rf /var/lib/zookeeper
+}
+
+function install_kafka {
+
+    echo "Install   Kafka"
+
+    if [[ "$OFFLINE" != "True" ]]; then
+        sudo curl http://apache.mirrors.tds.net/kafka/${BASE_KAFKA_VERSION}/kafka_${KAFKA_VERSION}.tgz \
+            -o /root/kafka_${KAFKA_VERSION}.tgz
+    fi
+
+    sudo groupadd --system kafka || true
+
+    sudo useradd --system -g kafka kafka || true
+
+    sudo tar -xzf /root/kafka_${KAFKA_VERSION}.tgz -C /opt
+
+    sudo ln -sf /opt/kafka_${KAFKA_VERSION} /opt/kafka
+
+    sudo cp -f "${CONF_BASE}"/conf/kafka/kafka-server-start.sh /opt/kafka_${KAFKA_VERSION}/bin/kafka-server-start.sh
+
+    sudo cp -f "${CONF_BASE}"/conf/kafka/kafka.conf /etc/init/kafka.conf
+
+    sudo chown root:root /etc/init/kafka.conf
+
+    sudo chmod 644 /etc/init/kafka.conf
+
+    sudo mkdir -p /var/kafka || true
+
+    sudo chown kafka:kafka /var/kafka
+
+    sudo chmod 755 /var/kafka
+
+    sudo rm -rf /var/kafka/lost+found
+
+    sudo mkdir -p /var/log/kafka || true
+
+    sudo chown kafka:kafka /var/log/kafka
+
+    sudo chmod 755 /var/log/kafka
+
+    sudo ln -sf /opt/kafka/config /etc/kafka
+
+    sudo cp -f "${CONF_BASE}"/conf/kafka/log4j.properties /etc/kafka/log4j.properties
+
+    sudo chown kafka:kafka /etc/kafka/log4j.properties
+
+    sudo chmod 644 /etc/kafka/log4j.properties
+
+    sudo cp -f "${CONF_BASE}"/conf/kafka/server.properties /etc/kafka/server.properties
+
+    sudo chown kafka:kafka /etc/kafka/server.properties
+
+    sudo chmod 644 /etc/kafka/server.properties
+
+    if [[ ${SERVICE_HOST} ]]; then
+
+        sudo sed -i "s/host\.name=127\.0\.0\.1/host.name=${SERVICE_HOST}/g" /etc/kafka/server.properties
+        sudo sed -i "s/zookeeper\.connect=127\.0\.0\.1:2181/zookeeper.connect=${SERVICE_HOST}:2181/g" /etc/kafka/server.properties
+
+    fi
+
+    sudo start kafka || sudo restart kafka
+}
+
+function clean_kafka {
+
+    echo "Clean   Kafka"
+ 
+    sudo stop kafka || true
+
+    sudo rm -rf /var/kafka
+
+    sudo rm -rf /var/log/kafka
+
+    sudo rm -rf /etc/kafka
+
+    sudo rm -rf /opt/kafka
+
+    sudo rm -rf /etc/init/kafka.conf
+
+    sudo userdel kafka
+
+    sudo groupdel kafka
+
+    sudo rm -rf /opt/kafka_${KAFKA_VERSION}
+
+    sudo rm -rf /root/kafka_${KAFKA_VERSION}.tgz
+
+}
+
+if [ $1 == "install" ] ; then
+   install_openjdk_7_jdk
+   install_zookeeper
+   install_kafka
+   echo "Waiting for kafka and zookeeper to come up"
+   sleep 5
+   /opt/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic ceilometer
+fi
+
+if [ $1 == "clean" ] ; then
+   clean_kafka
+   clean_zookeeper
+   clean_openjdk_7_jdk
+fi
+
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/README.md b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/README.md
new file mode 100644
index 0000000..7b5157f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/README.md
@@ -0,0 +1,21 @@
+# ceilometer-ansible-mitaka
+
+1.Adding of ceilometer user in mongodb using mongodb ansible module is throwing error,Need to fix.
+  Workaroung using scripter is added.
+2.Mongodb connection in ceilometer.conf mitaka has error.
+  
+  [database]
+...
+connection = mongodb://ceilometer:CEILOMETER_DBPASS@controller:27017/ceilometer
+
+ The above connection is not working.So replaced with below lines:
+ metering_connection = mongodb://localhost:27017/ceilometer
+ event_connection = mongodb://localhost:27017/ceilometer
+
+3.# mongo --host controller --eval '
+  db = db.getSiblingDB("ceilometer");
+  db.createUser({user: "ceilometer",
+  pwd: "CEILOMETER_DBPASS",
+  roles: [ "readWrite", "dbAdmin" ]})'
+
+  The above command creteUser has some issues,so it is replaced with addUser(point 1 using script).
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/admin-openrc.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/admin-openrc.sh
new file mode 100644
index 0000000..9bbbc87
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/admin-openrc.sh
@@ -0,0 +1,8 @@
+unset OS_PROJECT_DOMAIN_ID
+unset OS_USER_DOMAIN_ID
+export OS_PROJECT_NAME=admin
+export OS_TENANT_NAME=admin
+export OS_USERNAME=admin
+export OS_PASSWORD=password
+export OS_AUTH_URL=http://localhost:35357/v2.0
+ceilometer meter-list
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/configure_users_mitaka.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/configure_users_mitaka.sh
new file mode 100644
index 0000000..b59a0c2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/configure_users_mitaka.sh
@@ -0,0 +1,38 @@
+set -x
+export OS_TOKEN=ADMIN_TOKEN
+export OS_URL=http://localhost:35357/v2.0
+#Deleting services:
+for i in $(openstack service list -f table -c ID); do openstack service delete $i; done
+for i in $(openstack user list -f table -c ID); do openstack user delete $i; done
+for i in $(openstack role list -f table -c ID); do openstack role delete $i; done
+for i in $(openstack project list -f table -c ID); do openstack project delete $i; done
+openstack service create --name keystone --description "OpenStack Identity" identity
+openstack endpoint create \
+  --publicurl http://localhost:5000/v2.0 \
+  --internalurl http://localhost:5000/v2.0 \
+  --adminurl http://localhost:35357/v2.0 \
+  --region RegionOne \
+  identity
+openstack project create --description "Admin Project" admin
+openstack user create admin --password password --email admin@cord.com
+openstack role create admin
+openstack role add --project admin --user admin admin
+openstack project create --description "Service Project" service
+openstack project create --description "Demo Project" demo
+openstack user create demo --password password --email demo@cord.com
+openstack role create user
+openstack role add --project demo --user demo user
+openstack user create ceilometer --password password --email ceilometer@cord.com
+openstack role add --project service --user ceilometer admin
+openstack service create --name ceilometer --description "Telemetry" metering
+openstack endpoint create \
+  --publicurl http://localhost:8777 \
+  --internalurl http://localhost:8777 \
+  --adminurl http://localhost:8777 \
+  --region RegionOne \
+  metering
+openstack user list
+openstack role list
+openstack project list
+openstack service list
+openstack endpoint list
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/install_ansible.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/install_ansible.sh
new file mode 100644
index 0000000..40dcdd0
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/install_ansible.sh
@@ -0,0 +1,7 @@
+sudo apt-get update
+sudo apt-get -y install software-properties-common git mosh tmux dnsutils python-netaddr
+sudo add-apt-repository -y ppa:ansible/ansible
+sudo apt-get update
+sudo apt-get install -y ansible
+[ -e ~/.ssh/id_rsa ] || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/mongo_user.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/mongo_user.sh
new file mode 100644
index 0000000..54f632e
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/mongo_user.sh
@@ -0,0 +1,6 @@
+mongo --host localhost --eval '
+  db = db.getSiblingDB("ceilometer");
+  db.addUser({user: "ceilometer",
+  pwd: "password",
+  roles: [ "readWrite", "dbAdmin" ]})'
+
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/os_ceilometer_mitaka.yml b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/os_ceilometer_mitaka.yml
new file mode 100644
index 0000000..4ed2da2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/os_ceilometer_mitaka.yml
@@ -0,0 +1,180 @@
+---
+- name: Install Standalone ceilometer
+  hosts: local
+  vars:
+    mysql_root_password: "password"
+  sudo: yes
+  tasks:
+  # Adding cloud repo and update,upgrade
+  - name: install repository
+    apt: name=software-properties-common state=present
+  - name: Adding package to repository list
+    shell: add-apt-repository cloud-archive:mitaka
+  - name: apt update
+    apt: update_cache=yes
+  - name: apt dist-upgrade
+    apt: upgrade=dist
+  - name: installing openstack clients
+    apt: name=python-openstackclient state=present 
+ #Installing Mysql Service
+  - name: Install Mysql Service
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - mariadb-server
+      - python-pymysql
+      - python-mysqldb
+
+  - name: Copy cnf file 
+    template: src=openstack.cnf.j2 dest=/etc/mysql/conf.d/openstack.cnf owner=root group=root mode=0644 
+  - name: Start the MySQL service
+    service: name=mysql state=restarted enabled=true
+
+  # Mysql secure installation
+  # Note: Please comment this section if this playbook is not excuting first time(fix me)
+  - name: delete anonymous MySQL server user for localhost
+    action: mysql_user user="" host="localhost" state="absent"
+  - name: delete anonymous MySQL server user for localhost
+    action: mysql_user user="" state="absent"
+  - name: remove the MySQL test database
+    action: mysql_db db=test state=absent
+
+  #Updating root permissions
+  - name: update mysql root password for all root accounts
+    sudo: yes
+    mysql_user: 
+      name: root 
+      host: "{{ item }}" 
+      login_user: root
+      #password: "{{ mysql_root_password }}"
+      password: "password"
+      login_password: "{{ mysql_root_password }}"
+      check_implicit_admin: yes
+      priv: "*.*:ALL,GRANT"
+    with_items:
+      - "{{ ansible_hostname }}"
+      - 127.0.0.1
+      - ::1
+      - localhost
+  #Installing rabbitmq service
+  - name: rabbitmq-server
+    apt: name=rabbitmq-server state=present
+  - name : Adding Rabbitmq user
+    shell : rabbitmqctl add_user openstack "password";rabbitmqctl set_permissions openstack ".*" ".*" ".*"
+
+  # Installing Keystone Service
+  - name: Creating keystone.override file 
+    template: src=keystone.override.j2 dest=/etc/init/keystone.override owner=root group=root mode=0644 
+
+  - name: Install Keystone
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - keystone 
+      - apache2 
+      - libapache2-mod-wsgi 	
+
+    # installing memcached
+  - name : installing memcached service
+    apt: name={{ item }}  state=present   
+    with_items:
+      - memcached
+      - python-memcache
+  - name: reStart the memcached service
+    service: name=memcached state=restarted enabled=true	  
+    #editing memcache conf file
+  - name: Adding new line
+    lineinfile: dest=/etc/memcached.conf line="-l 127.0.0.1"
+  - name: Restart memcached service
+    service: name=memcached state=restarted enabled=true  	
+  - name: Keystone create DB for service
+    mysql_db:
+      login_user: "root"
+      login_password: "password"
+      login_host: "localhost"
+      name: "keystone"
+      state: "present"
+  - name: Keystone grant access to the DB for the service
+    mysql_user:
+      login_user: "root"
+      login_password: "password"
+      login_host: "localhost"
+      name: "keystone"
+      password: "password"
+      host: "{{ item }}"
+      state: "present"
+      priv: "keystone.*:ALL"
+    with_items:
+       - "localhost"
+       - "%"
+ # Installing Keystone Service
+  - name: Creating keystone.override file 
+    template: src=keystone.override.j2 dest=/etc/init/keystone.override owner=root group=root mode=0644 
+
+  - name: Install Keystone
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - keystone 
+      - apache2 
+      - libapache2-mod-wsgi 
+  - name: Creating keystone.conf  
+    template: src=keystone.conf.j2 dest=/etc/keystone/keystone.conf owner=root group=root mode=0644
+  - name: Running sync database
+    shell: /bin/sh -c "keystone-manage db_sync" keystone
+  - name: initializing fernet keystone
+    shell: keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone  
+  # Configuring apache server
+  - name: Adding new line
+    lineinfile: dest=/etc/apache2/apache2.conf line="ServerName localhost"
+
+  - name: Creating wsgi-keystone.conf  
+    template: src=wsgi-keystone.conf.j2 dest=/etc/apache2/sites-available/wsgi-keystone.conf owner=root group=root mode=0644
+  - name : Configuring apache2
+    shell:  ln -s /etc/apache2/sites-available/wsgi-keystone.conf /etc/apache2/sites-enabled
+    ignore_errors: yes
+  - name: Restart apache2 service
+    service: name=apache2 state=restarted enabled=true	
+  - name : removing previous databases
+    shell: rm -f /var/lib/keystone/keystone.db
+ #Installing mongod db server
+  - name: Install the Mongo db server
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - mongodb-server
+      - mongodb-clients
+      - python-pymongo
+  - name: Creating mongodb.conf
+    template: src=mongodb.conf.j2 dest=/etc/mongodb.conf owner=root group=root mode=0644
+  - name: Stopping mongodb service
+    service: name=mongodb state=stopped
+  - name: Removing mongodb files
+    shell: rm -rf /var/lib/mongodb/journal/prealloc.*
+  - name: starting mongodb service
+    service: name=mongodb state=started
+  - name : Configuring users
+    script: configure_users_mitaka.sh
+  - name: Adding ceilometer database
+    script: mongo_user.sh
+    #mongodb_user: database=ceilometer name=ceilometer password=password roles='readWrite,userAdmin' state=present	
+  #Installing Ceilometer Services
+  - name : Install Ceilometer services
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - ceilometer-api
+      - ceilometer-collector
+      - ceilometer-agent-central
+      - ceilometer-agent-notification
+      - python-ceilometerclient
+ 
+  - name: Creating Ceilometer.conf
+    template: src=ceilometer.conf.j2 dest=/etc/ceilometer/ceilometer.conf owner=root group=root mode=0644
+
+  - name: Restarting ceilometer-agent-central
+    service: name=ceilometer-agent-central state=restarted
+
+  - name: Restarting ceilometer-agent-notification
+    service: name=ceilometer-agent-notification state=restarted
+
+  - name: Restarting Celometer API
+    service: name=ceilometer-api state=restarted
+
+  - name: Restarting ceilometer-collector
+    service: name=ceilometer-collector state=restarted
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/apache2.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/apache2.conf.j2
new file mode 100644
index 0000000..52c0eeb
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/apache2.conf.j2
@@ -0,0 +1,222 @@
+# This is the main Apache server configuration file.  It contains the
+# configuration directives that give the server its instructions.
+# See http://httpd.apache.org/docs/2.4/ for detailed information about
+# the directives and /usr/share/doc/apache2/README.Debian about Debian specific
+# hints.
+#
+#
+# Summary of how the Apache 2 configuration works in Debian:
+# The Apache 2 web server configuration in Debian is quite different to
+# upstream's suggested way to configure the web server. This is because Debian's
+# default Apache2 installation attempts to make adding and removing modules,
+# virtual hosts, and extra configuration directives as flexible as possible, in
+# order to make automating the changes and administering the server as easy as
+# possible.
+
+# It is split into several files forming the configuration hierarchy outlined
+# below, all located in the /etc/apache2/ directory:
+#
+#	/etc/apache2/
+#	|-- apache2.conf
+#	|	`--  ports.conf
+#	|-- mods-enabled
+#	|	|-- *.load
+#	|	`-- *.conf
+#	|-- conf-enabled
+#	|	`-- *.conf
+# 	`-- sites-enabled
+#	 	`-- *.conf
+#
+#
+# * apache2.conf is the main configuration file (this file). It puts the pieces
+#   together by including all remaining configuration files when starting up the
+#   web server.
+#
+# * ports.conf is always included from the main configuration file. It is
+#   supposed to determine listening ports for incoming connections which can be
+#   customized anytime.
+#
+# * Configuration files in the mods-enabled/, conf-enabled/ and sites-enabled/
+#   directories contain particular configuration snippets which manage modules,
+#   global configuration fragments, or virtual host configurations,
+#   respectively.
+#
+#   They are activated by symlinking available configuration files from their
+#   respective *-available/ counterparts. These should be managed by using our
+#   helpers a2enmod/a2dismod, a2ensite/a2dissite and a2enconf/a2disconf. See
+#   their respective man pages for detailed information.
+#
+# * The binary is called apache2. Due to the use of environment variables, in
+#   the default configuration, apache2 needs to be started/stopped with
+#   /etc/init.d/apache2 or apache2ctl. Calling /usr/bin/apache2 directly will not
+#   work with the default configuration.
+
+
+# Global configuration
+#
+
+#
+# ServerRoot: The top of the directory tree under which the server's
+# configuration, error, and log files are kept.
+#
+# NOTE!  If you intend to place this on an NFS (or otherwise network)
+# mounted filesystem then please read the Mutex documentation (available
+# at <URL:http://httpd.apache.org/docs/2.4/mod/core.html#mutex>);
+# you will save yourself a lot of trouble.
+#
+# Do NOT add a slash at the end of the directory path.
+#
+#ServerRoot "/etc/apache2"
+
+#
+# The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
+#
+Mutex file:${APACHE_LOCK_DIR} default
+
+#
+# PidFile: The file in which the server should record its process
+# identification number when it starts.
+# This needs to be set in /etc/apache2/envvars
+#
+PidFile ${APACHE_PID_FILE}
+
+#
+# Timeout: The number of seconds before receives and sends time out.
+#
+Timeout 300
+
+#
+# KeepAlive: Whether or not to allow persistent connections (more than
+# one request per connection). Set to "Off" to deactivate.
+#
+KeepAlive On
+
+#
+# MaxKeepAliveRequests: The maximum number of requests to allow
+# during a persistent connection. Set to 0 to allow an unlimited amount.
+# We recommend you leave this number high, for maximum performance.
+#
+MaxKeepAliveRequests 100
+
+#
+# KeepAliveTimeout: Number of seconds to wait for the next request from the
+# same client on the same connection.
+#
+KeepAliveTimeout 5
+
+
+# These need to be set in /etc/apache2/envvars
+User ${APACHE_RUN_USER}
+Group ${APACHE_RUN_GROUP}
+
+#
+# HostnameLookups: Log the names of clients or just their IP addresses
+# e.g., www.apache.org (on) or 204.62.129.132 (off).
+# The default is off because it'd be overall better for the net if people
+# had to knowingly turn this feature on, since enabling it means that
+# each client request will result in AT LEAST one lookup request to the
+# nameserver.
+#
+HostnameLookups Off
+
+# ErrorLog: The location of the error log file.
+# If you do not specify an ErrorLog directive within a <VirtualHost>
+# container, error messages relating to that virtual host will be
+# logged here.  If you *do* define an error logfile for a <VirtualHost>
+# container, that host's errors will be logged there and not here.
+#
+ErrorLog ${APACHE_LOG_DIR}/error.log
+
+#
+# LogLevel: Control the severity of messages logged to the error_log.
+# Available values: trace8, ..., trace1, debug, info, notice, warn,
+# error, crit, alert, emerg.
+# It is also possible to configure the log level for particular modules, e.g.
+# "LogLevel info ssl:warn"
+#
+LogLevel warn
+
+# Include module configuration:
+IncludeOptional mods-enabled/*.load
+IncludeOptional mods-enabled/*.conf
+
+# Include list of ports to listen on
+Include ports.conf
+
+
+# Sets the default security model of the Apache2 HTTPD server. It does
+# not allow access to the root filesystem outside of /usr/share and /var/www.
+# The former is used by web applications packaged in Debian,
+# the latter may be used for local directories served by the web server. If
+# your system is serving content from a sub-directory in /srv you must allow
+# access here, or in any related virtual host.
+<Directory />
+	Options FollowSymLinks
+	AllowOverride None
+	Require all denied
+</Directory>
+
+<Directory /usr/share>
+	AllowOverride None
+	Require all granted
+</Directory>
+
+<Directory /var/www/>
+	Options Indexes FollowSymLinks
+	AllowOverride None
+	Require all granted
+</Directory>
+
+#<Directory /srv/>
+#	Options Indexes FollowSymLinks
+#	AllowOverride None
+#	Require all granted
+#</Directory>
+
+
+
+
+# AccessFileName: The name of the file to look for in each directory
+# for additional configuration directives.  See also the AllowOverride
+# directive.
+#
+AccessFileName .htaccess
+
+#
+# The following lines prevent .htaccess and .htpasswd files from being
+# viewed by Web clients.
+#
+<FilesMatch "^\.ht">
+	Require all denied
+</FilesMatch>
+
+
+#
+# The following directives define some format nicknames for use with
+# a CustomLog directive.
+#
+# These deviate from the Common Log Format definitions in that they use %O
+# (the actual bytes sent including headers) instead of %b (the size of the
+# requested file), because the latter makes it impossible to detect partial
+# requests.
+#
+# Note that the use of %{X-Forwarded-For}i instead of %h is not recommended.
+# Use mod_remoteip instead.
+#
+LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
+LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
+LogFormat "%h %l %u %t \"%r\" %>s %O" common
+LogFormat "%{Referer}i -> %U" referer
+LogFormat "%{User-agent}i" agent
+
+# Include of directories ignores editors' and dpkg's backup files,
+# see README.Debian for details.
+
+# Include generic snippets of statements
+IncludeOptional conf-enabled/*.conf
+
+# Include the virtual host configurations:
+IncludeOptional sites-enabled/*.conf
+
+# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
+ServerName localhost 
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/ceilometer.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/ceilometer.conf.j2
new file mode 100644
index 0000000..b89a3d9
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/ceilometer.conf.j2
@@ -0,0 +1,960 @@
+[DEFAULT]
+rpc_backend = rabbit
+auth_strategy = keystone
+debug = True
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
+
+# Type of concurrency used. Either "native" or "eventlet" (string value)
+#rpc_zmq_concurrency = eventlet
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no linger
+# period. Pending messages shall be discarded immediately when the socket is
+# closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
+
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target
+# ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = true
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full configuration. If
+# not set, we fall back to the rpc_backend option and driver specific
+# configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers include amqp
+# and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange = openstack
+
+#
+# From oslo.service.service
+#
+
+# Enable eventlet backdoor.  Acceptable values are 0, <port>, and
+# <start>:<end>, where 0 results in listening on a random tcp port number;
+# <port> results in listening on the specified port number (and not enabling
+# backdoor if that port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range of port numbers.
+# The chosen port is displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+# Enables or disables logging values of all registered options when starting a
+# service (at DEBUG level). (boolean value)
+#log_options = true
+
+# Specify a timeout after which a gracefully shutdown server will exit. Zero
+# value means endless wait. (integer value)
+#graceful_shutdown_timeout = 60
+
+
+[cors]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,POST,PUT,DELETE,OPTIONS
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,POST,PUT,DELETE,OPTIONS
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+
+[database]
+#connection = mongodb://ceilometer:password@localhost:27017/ceilometer
+metering_connection = mongodb://localhost:27017/ceilometer
+event_connection = mongodb://localhost:27017/ceilometer
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+
+[keystone_authtoken]
+auth_uri = http://localhost:5000
+auth_url = http://localhost:35357
+memcached_servers = localhost:11211
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = ceilometer
+password = password
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+#auth_uri = <None>
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but delegate the
+# authorization decision to downstream WSGI components. (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server. (integer
+# value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with Identity
+# API Server. (integer value)
+#http_request_max_retries = 3
+
+# Env key for the swift cache. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+
+# Directory used to cache files related to PKI tokens. (string value)
+#signing_dir = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching. If left
+# undefined, tokens will instead be cached in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the middleware
+# caches previously-seen tokens for a configurable duration (in seconds). Set
+# to -1 to disable caching completely. (integer value)
+#token_cache_time = 300
+
+# Determines the frequency at which the list of revoked tokens is retrieved
+# from the Identity service (in seconds). A high number of revocation events
+# combined with a low cache duration may significantly reduce performance.
+# (integer value)
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be authenticated or
+# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
+# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
+# cache. If the value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This string is
+# used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead before it is
+# tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every memcached
+# server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a memcached
+# server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a memcached
+# client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool. The
+# advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
+# middleware will not ask for service catalog on token validation and will not
+# set the X-Service-Catalog header. (boolean value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to: "disabled"
+# to not check token binding. "permissive" (default) to validate binding
+# information if the bind type is of a form known to the server and ignore it
+# if not. "strict" like "permissive" but if the bind type is unknown the token
+# will be rejected. "required" any form of token binding is needed to be
+# allowed. Finally the name of a binding method that must be present in tokens.
+# (string value)
+#enforce_token_bind = permissive
+
+# If true, the revocation list will be checked for cached tokens. This requires
+# that PKI tokens are configured on the identity server. (boolean value)
+#check_revocations_for_cached = false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
+# or multiple. The algorithms are those supported by Python standard
+# hashlib.new(). The hashes will be tried in the order given, so put the
+# preferred one first for performance. The result of the first hash will be
+# stored in the cache. This will typically be set to multiple values only while
+# migrating from a less secure algorithm to a more secure one. Once all the old
+# tokens are expired this option should be set to a single value for better
+# performance. (list value)
+#hash_algorithms = md5
+
+# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
+# (string value)
+#auth_admin_prefix =
+
+# Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
+# (string value)
+#auth_host = 127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use identity_uri.
+# (integer value)
+#auth_port = 35357
+
+# Protocol of the admin Identity API endpoint. Deprecated, use identity_uri.
+# (string value)
+# Allowed values: http, https
+#auth_protocol = https
+
+# Complete admin Identity API endpoint. This should specify the unversioned
+# root endpoint e.g. https://localhost:35357/ (string value)
+#identity_uri = <None>
+
+# This option is deprecated and may be removed in a future release. Single
+# shared secret with the Keystone configuration used for bootstrapping a
+# Keystone installation, or otherwise bypassing the normal authentication
+# process. This option should not be used, use `admin_user` and
+# `admin_password` instead. (string value)
+#admin_token = <None>
+
+# Service username. (string value)
+#admin_user = <None>
+
+# Service user password. (string value)
+#admin_password = <None>
+
+# Service tenant name. (string value)
+#admin_tenant_name = admin
+
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (unknown value)
+#auth_section = <None>
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host = 127.0.0.1
+
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
+
+# Password for Redis server (optional). (string value)
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
+
+
+[oslo_concurrency]
+
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+# Deprecated group/name - [DEFAULT]/disable_process_locking
+#disable_process_locking = false
+
+# Directory to use for lock files.  For security, the specified directory
+# should only be writable by the user running the processes that need locking.
+# Defaults to environment variable OSLO_LOCK_PATH. If OSLO_LOCK_PATH is not set
+# in the environment, use the Python tempfile.gettempdir function to find a
+# suitable location. If external locks are used, a lock path must be set.
+# (string value)
+# Deprecated group/name - [DEFAULT]/lock_path
+#lock_path = /tmp
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+#group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+#trace = false
+
+# CA certificate PEM file to verify server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+#ssl_key_password = <None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If not set,
+# we fall back to the same configuration used for RPC. (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
+# be used. This option may notbe available in future versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its replies.
+# This value should not be longer than rpc_response_timeout. (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we are
+# currently connected to becomes unavailable. Takes effect only if more than
+# one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
+
+# The RabbitMQ broker address where a single node is used. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+#rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/rabbit_port
+#rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+#rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+#rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+#rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
+# (integer value)
+#rabbit_interval_max = 30
+
+# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
+# count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
+# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
+# is no longer controlled by the x-ha-policy argument when declaring a queue.
+# If you just want to make sure that all queues (except  those with auto-
+# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
+# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL (x-expires).
+# Queues which are unused for the duration of the TTL are automatically
+# deleted. The parameter affects only reply and fanout queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
+# value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point
+# value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error (floating
+# point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`. (integer
+# value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available (integer
+# value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are considered stale
+# in seconds or None for no staleness. Stale connections are closed on acquire.
+# (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc
+# listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc reply
+# listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# reply. -1 means infinite retry during rpc_timeout (integer value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during sending RPC
+# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc
+# request could be processed more then one time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending RPC
+# message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched.  Missing or empty directories are ignored. (multi
+# valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[service_credentials]
+auth_type = password
+auth_url = http://localhost:5000/v3
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = ceilometer
+password = password
+interface = internalURL
+region_name = RegionOne
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/keystone.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/keystone.conf.j2
new file mode 100644
index 0000000..fe6d4fc
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/keystone.conf.j2
@@ -0,0 +1,2100 @@
+[DEFAULT]
+
+#
+# From keystone
+#
+
+# A "shared secret" that can be used to bootstrap Keystone. This "token" does
+# not represent a user, and carries no explicit authorization. If set to
+# `None`, the value is ignored and the `admin_token` log in mechanism is
+# effectively disabled. To completely disable `admin_token` in production
+# (highly recommended), remove AdminTokenAuthMiddleware from your paste
+# application pipelines (for example, in keystone-paste.ini). (string value)
+#admin_token = <None>
+admin_token = ADMIN_TOKEN
+
+# The base public endpoint URL for Keystone that is advertised to clients
+# (NOTE: this does NOT affect how Keystone listens for connections). Defaults
+# to the base host URL of the request. E.g. a request to
+# http://server:5000/v3/users will default to http://server:5000. You should
+# only need to set this value if the base URL contains a path (e.g. /prefix/v3)
+# or the endpoint should be found on a different server. (string value)
+#public_endpoint = <None>
+
+# The base admin endpoint URL for Keystone that is advertised to clients (NOTE:
+# this does NOT affect how Keystone listens for connections). Defaults to the
+# base host URL of the request. E.g. a request to http://server:35357/v3/users
+# will default to http://server:35357. You should only need to set this value
+# if the base URL contains a path (e.g. /prefix/v3) or the endpoint should be
+# found on a different server. (string value)
+#admin_endpoint = <None>
+
+# Maximum depth of the project hierarchy, excluding the project acting as a
+# domain at the top of the hierarchy. WARNING: setting it to a large value may
+# adversely impact  performance. (integer value)
+#max_project_tree_depth = 5
+
+# Limit the sizes of user & project ID/names. (integer value)
+#max_param_size = 64
+
+# Similar to max_param_size, but provides an exception for token values.
+# (integer value)
+#max_token_size = 8192
+
+# Similar to the member_role_name option, this represents the default role ID
+# used to associate users with their default projects in the v2 API. This will
+# be used as the explicit role where one is not specified by the v2 API.
+# (string value)
+#member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
+
+# This is the role name used in combination with the member_role_id option; see
+# that option for more detail. (string value)
+#member_role_name = _member_
+
+# The value passed as the keyword "rounds" to passlib's encrypt method.
+# (integer value)
+# Minimum value: 1000
+# Maximum value: 100000
+#crypt_strength = 10000
+
+# The maximum number of entities that will be returned in a collection, with no
+# limit set by default. This global limit may be then overridden for a specific
+# driver, by specifying a list_limit in the appropriate section (e.g.
+# [assignment]). (integer value)
+#list_limit = <None>
+
+# Set this to false if you want to enable the ability for user, group and
+# project entities to be moved between domains by updating their domain_id.
+# Allowing such movement is not recommended if the scope of a domain admin is
+# being restricted by use of an appropriate policy file (see
+# policy.v3cloudsample as an example). This ability is deprecated and will be
+# removed in a future release. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#domain_id_immutable = true
+
+# If set to true, strict password length checking is performed for password
+# manipulation. If a password exceeds the maximum length, the operation will
+# fail with an HTTP 403 Forbidden error. If set to false, passwords are
+# automatically truncated to the maximum length. (boolean value)
+#strict_password_check = false
+
+# The HTTP header used to determine the scheme for the original request, even
+# if it was removed by an SSL terminating proxy. (string value)
+#secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO
+
+# If set to true the server will return information in the response that may
+# allow an unauthenticated or authenticated user to get more information than
+# normal, such as why authentication failed. This may be useful for debugging
+# but is insecure. (boolean value)
+#insecure_debug = false
+
+#
+# From keystone.notifications
+#
+
+# Default publisher_id for outgoing notifications (string value)
+#default_publisher_id = <None>
+
+# Define the notification format for Identity Service events. A "basic"
+# notification has information about the resource being operated on. A "cadf"
+# notification has the same information, as well as information about the
+# initiator of the event. (string value)
+# Allowed values: basic, cadf
+#notification_format = basic
+
+# Define the notification options to opt-out from. The value expected is:
+# identity.<resource_type>.<operation>. This field can be set multiple times in
+# order to add more notifications to opt-out from. For example:
+#  notification_opt_out=identity.user.created
+#  notification_opt_out=identity.authenticate.success (multi valued)
+#notification_opt_out =
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+log_dir = /var/log/keystone
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
+
+# Type of concurrency used. Either "native" or "eventlet" (string value)
+#rpc_zmq_concurrency = eventlet
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no linger
+# period. Pending messages shall be discarded immediately when the socket is
+# closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
+
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target
+# ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = true
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full configuration. If
+# not set, we fall back to the rpc_backend option and driver specific
+# configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers include amqp
+# and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange = keystone
+
+#
+# From oslo.service.service
+#
+
+# Enable eventlet backdoor.  Acceptable values are 0, <port>, and
+# <start>:<end>, where 0 results in listening on a random tcp port number;
+# <port> results in listening on the specified port number (and not enabling
+# backdoor if that port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range of port numbers.
+# The chosen port is displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+# Enable eventlet backdoor, using the provided path as a unix socket that can
+# receive connections. This option is mutually exclusive with 'backdoor_port'
+# in that only one should be provided. If both are provided then the existence
+# of this option overrides the usage of that option. (string value)
+#backdoor_socket = <None>
+
+# Enables or disables logging values of all registered options when starting a
+# service (at DEBUG level). (boolean value)
+#log_options = true
+
+# Specify a timeout after which a gracefully shutdown server will exit. Zero
+# value means endless wait. (integer value)
+#graceful_shutdown_timeout = 60
+
+
+[assignment]
+
+#
+# From keystone
+#
+
+# Entrypoint for the assignment backend driver in the keystone.assignment
+# namespace. Only an SQL driver is supplied. If an assignment driver is not
+# specified, the identity driver will choose the assignment driver (driver
+# selection based on `[identity]/driver` option is deprecated and will be
+# removed in the "O" release). (string value)
+#driver = <None>
+
+# A list of role names which are prohibited from being an implied role. (list
+# value)
+#prohibited_implied_role = admin
+
+
+[auth]
+
+#
+# From keystone
+#
+
+# Allowed authentication methods. (list value)
+#methods = external,password,token,oauth1
+
+# Entrypoint for the password auth plugin module in the keystone.auth.password
+# namespace. (string value)
+#password = <None>
+
+# Entrypoint for the token auth plugin module in the keystone.auth.token
+# namespace. (string value)
+#token = <None>
+
+# Entrypoint for the external (REMOTE_USER) auth plugin module in the
+# keystone.auth.external namespace. Supplied drivers are DefaultDomain and
+# Domain. The default driver is DefaultDomain. (string value)
+#external = <None>
+
+# Entrypoint for the oAuth1.0 auth plugin module in the keystone.auth.oauth1
+# namespace. (string value)
+#oauth1 = <None>
+
+
+[cache]
+
+#
+# From oslo.cache
+#
+
+# Prefix for building the configuration dictionary for the cache region. This
+# should not need to be changed unless there is another dogpile.cache region
+# with the same configuration name. (string value)
+#config_prefix = cache.oslo
+
+# Default TTL, in seconds, for any cached item in the dogpile.cache region.
+# This applies to any cached method that doesn't have an explicit cache
+# expiration time defined for it. (integer value)
+#expiration_time = 600
+
+# Dogpile.cache backend module. It is recommended that Memcache with pooling
+# (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be used in
+# production deployments.  Small workloads (single process) like devstack can
+# use the dogpile.cache.memory backend. (string value)
+#backend = dogpile.cache.null
+
+# Arguments supplied to the backend module. Specify this option once per
+# argument to be passed to the dogpile.cache backend. Example format:
+# "<argname>:<value>". (multi valued)
+#backend_argument =
+
+# Proxy classes to import that will affect the way the dogpile.cache backend
+# functions. See the dogpile.cache documentation on changing-backend-behavior.
+# (list value)
+#proxies =
+
+# Global toggle for caching. (boolean value)
+#enabled = false
+
+# Extra debugging from the cache backend (cache keys, get/set/delete/etc
+# calls). This is only really useful if you need to see the specific cache-
+# backend get/set/delete calls with the keys/values.  Typically this should be
+# left set to false. (boolean value)
+#debug_cache_backend = false
+
+# Memcache servers in the format of "host:port". (dogpile.cache.memcache and
+# oslo_cache.memcache_pool backends only). (list value)
+#memcache_servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is tried
+# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (integer value)
+#memcache_dead_retry = 300
+
+# Timeout in seconds for every call to a server. (dogpile.cache.memcache and
+# oslo_cache.memcache_pool backends only). (integer value)
+#memcache_socket_timeout = 3
+
+# Max total number of open connections to every memcached server.
+# (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the pool before
+# it is closed. (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache client
+# connection. (integer value)
+#memcache_pool_connection_get_timeout = 10
+
+
+[catalog]
+
+#
+# From keystone
+#
+
+# Catalog template file name for use with the template catalog backend. (string
+# value)
+#template_file = default_catalog.templates
+
+# Entrypoint for the catalog backend driver in the keystone.catalog namespace.
+# Supplied drivers are kvs, sql, templated, and endpoint_filter.sql (string
+# value)
+#driver = sql
+
+# Toggle for catalog caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache catalog data (in seconds). This has no effect unless global and
+# catalog caching are enabled. (integer value)
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a catalog collection.
+# (integer value)
+#list_limit = <None>
+
+
+[cors]
+
+#
+# From oslo.middleware
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name
+
+
+[credential]
+
+#
+# From keystone
+#
+
+# Entrypoint for the credential backend driver in the keystone.credential
+# namespace. (string value)
+#driver = sql
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+#connection = sqlite:////var/lib/keystone/keystone.db
+
+connection = mysql+pymysql://keystone:password@localhost/keystone
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+
+[domain_config]
+
+#
+# From keystone
+#
+
+# Entrypoint for the domain config backend driver in the
+# keystone.resource.domain_config namespace. (string value)
+#driver = sql
+
+# Toggle for domain config caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# TTL (in seconds) to cache domain config data. This has no effect unless
+# domain config caching is enabled. (integer value)
+#cache_time = 300
+
+
+[endpoint_filter]
+
+#
+# From keystone
+#
+
+# Entrypoint for the endpoint filter backend driver in the
+# keystone.endpoint_filter namespace. (string value)
+#driver = sql
+
+# Toggle to return all active endpoints if no filter exists. (boolean value)
+#return_all_endpoints_if_no_filter = true
+
+
+[endpoint_policy]
+
+#
+# From keystone
+#
+
+# Enable endpoint_policy functionality. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: The option to enable the OS-ENDPOINT-POLICY extension has been
+# deprecated in the M release and will be removed in the O release. The OS-
+# ENDPOINT-POLICY extension will be enabled by default.
+#enabled = true
+
+# Entrypoint for the endpoint policy backend driver in the
+# keystone.endpoint_policy namespace. (string value)
+#driver = sql
+
+
+[eventlet_server]
+
+#
+# From keystone
+#
+
+# The number of worker processes to serve the public eventlet application.
+# Defaults to number of CPUs (minimum of 2). (integer value)
+# Deprecated group/name - [DEFAULT]/public_workers
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#public_workers = <None>
+
+# The number of worker processes to serve the admin eventlet application.
+# Defaults to number of CPUs (minimum of 2). (integer value)
+# Deprecated group/name - [DEFAULT]/admin_workers
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#admin_workers = <None>
+
+# The IP address of the network interface for the public service to listen on.
+# (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+# Deprecated group/name - [DEFAULT]/public_bind_host
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#public_bind_host = 0.0.0.0
+
+# The port number which the public service listens on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/public_port
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#public_port = 5000
+
+# The IP address of the network interface for the admin service to listen on.
+# (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+# Deprecated group/name - [DEFAULT]/admin_bind_host
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#admin_bind_host = 0.0.0.0
+
+# The port number which the admin service listens on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/admin_port
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#admin_port = 35357
+
+# If set to false, disables keepalives on the server; all connections will be
+# closed after serving one request. (boolean value)
+#wsgi_keep_alive = true
+
+# Timeout for socket operations on a client connection. If an incoming
+# connection is idle for this number of seconds it will be closed. A value of
+# "0" means wait forever. (integer value)
+#client_socket_timeout = 900
+
+# Set this to true if you want to enable TCP_KEEPALIVE on server sockets, i.e.
+# sockets used by the Keystone wsgi server for client connections. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/tcp_keepalive
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#tcp_keepalive = false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Only
+# applies if tcp_keepalive is true. Ignored if system does not support it.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/tcp_keepidle
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#tcp_keepidle = 600
+
+
+[eventlet_server_ssl]
+
+#
+# From keystone
+#
+
+# Toggle for SSL support on the Keystone eventlet servers. (boolean value)
+# Deprecated group/name - [ssl]/enable
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#enable = false
+
+# Path of the certfile for SSL. For non-production environments, you may be
+# interested in using `keystone-manage ssl_setup` to generate self-signed
+# certificates. (string value)
+# Deprecated group/name - [ssl]/certfile
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#certfile = /etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+# Deprecated group/name - [ssl]/keyfile
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#keyfile = /etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the CA cert file for SSL. (string value)
+# Deprecated group/name - [ssl]/ca_certs
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#ca_certs = /etc/keystone/ssl/certs/ca.pem
+
+# Require client certificate. (boolean value)
+# Deprecated group/name - [ssl]/cert_required
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#cert_required = false
+
+
+[federation]
+
+#
+# From keystone
+#
+
+# Entrypoint for the federation backend driver in the keystone.federation
+# namespace. (string value)
+#driver = sql
+
+# Value to be used when filtering assertion parameters from the environment.
+# (string value)
+#assertion_prefix =
+
+# Value to be used to obtain the entity ID of the Identity Provider from the
+# environment (e.g. if using the mod_shib plugin this value is `Shib-Identity-
+# Provider`). (string value)
+#remote_id_attribute = <None>
+
+# A domain name that is reserved to allow federated ephemeral users to have a
+# domain concept. Note that an admin will not be able to create a domain with
+# this name or update an existing domain to this name. You are not advised to
+# change this value unless you really have to. (string value)
+#federated_domain_name = Federated
+
+# A list of trusted dashboard hosts. Before accepting a Single Sign-On request
+# to return a token, the origin host must be a member of the trusted_dashboard
+# list. This configuration option may be repeated for multiple values. For
+# example: trusted_dashboard=http://acme.com/auth/websso
+# trusted_dashboard=http://beta.com/auth/websso (multi valued)
+#trusted_dashboard =
+
+# Location of Single Sign-On callback handler, will return a token to a trusted
+# dashboard host. (string value)
+#sso_callback_template = /etc/keystone/sso_callback_template.html
+
+
+[fernet_tokens]
+
+#
+# From keystone
+#
+
+# Directory containing Fernet token keys. (string value)
+#key_repository = /etc/keystone/fernet-keys/
+
+# This controls how many keys are held in rotation by keystone-manage
+# fernet_rotate before they are discarded. The default value of 3 means that
+# keystone will maintain one staged key, one primary key, and one secondary
+# key. Increasing this value means that additional secondary keys will be kept
+# in the rotation. (integer value)
+#max_active_keys = 3
+
+
+[identity]
+
+#
+# From keystone
+#
+
+# This references the domain to use for all Identity API v2 requests (which are
+# not aware of domains). A domain with this ID will be created for you by
+# keystone-manage db_sync in migration 008. The domain referenced by this ID
+# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API.
+# There is nothing special about this domain, other than the fact that it must
+# exist to order to maintain support for your v2 clients. (string value)
+#default_domain_id = default
+
+# A subset (or all) of domains can have their own identity driver, each with
+# their own partial configuration options, stored in either the resource
+# backend or in a file in a domain configuration directory (depending on the
+# setting of domain_configurations_from_database). Only values specific to the
+# domain need to be specified in this manner. This feature is disabled by
+# default; set to true to enable. (boolean value)
+#domain_specific_drivers_enabled = false
+
+# Extract the domain specific configuration options from the resource backend
+# where they have been stored with the domain data. This feature is disabled by
+# default (in which case the domain specific options will be loaded from files
+# in the domain configuration directory); set to true to enable. (boolean
+# value)
+#domain_configurations_from_database = false
+
+# Path for Keystone to locate the domain specific identity configuration files
+# if domain_specific_drivers_enabled is set to true. (string value)
+#domain_config_dir = /etc/keystone/domains
+
+# Entrypoint for the identity backend driver in the keystone.identity
+# namespace. Supplied drivers are ldap and sql. (string value)
+#driver = sql
+
+# Toggle for identity caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache identity data (in seconds). This has no effect unless global
+# and identity caching are enabled. (integer value)
+#cache_time = 600
+
+# Maximum supported length for user passwords; decrease to improve performance.
+# (integer value)
+# Maximum value: 4096
+#max_password_length = 4096
+
+# Maximum number of entities that will be returned in an identity collection.
+# (integer value)
+#list_limit = <None>
+
+
+[identity_mapping]
+
+#
+# From keystone
+#
+
+# Entrypoint for the identity mapping backend driver in the
+# keystone.identity.id_mapping namespace. (string value)
+#driver = sql
+
+# Entrypoint for the public ID generator for user and group entities in the
+# keystone.identity.id_generator namespace. The Keystone identity mapper only
+# supports generators that produce no more than 64 characters. (string value)
+#generator = sha256
+
+# The format of user and group IDs changed in Juno for backends that do not
+# generate UUIDs (e.g. LDAP), with keystone providing a hash mapping to the
+# underlying attribute in LDAP. By default this mapping is disabled, which
+# ensures that existing IDs will not change. Even when the mapping is enabled
+# by using domain specific drivers, any users and groups from the default
+# domain being handled by LDAP will still not be mapped to ensure their IDs
+# remain backward compatible. Setting this value to False will enable the
+# mapping for even the default LDAP driver. It is only safe to do this if you
+# do not already have assignments for users and groups from the default LDAP
+# domain, and it is acceptable for Keystone to provide the different IDs to
+# clients than it did previously. Typically this means that the only time you
+# can set this value to False is when configuring a fresh installation.
+# (boolean value)
+#backward_compatible_ids = true
+
+
+[kvs]
+
+#
+# From keystone
+#
+
+# Extra dogpile.cache backend modules to register with the dogpile.cache
+# library. (list value)
+#backends =
+
+# Prefix for building the configuration dictionary for the KVS region. This
+# should not need to be changed unless there is another dogpile.cache region
+# with the same configuration name. (string value)
+#config_prefix = keystone.kvs
+
+# Toggle to disable using a key-mangling function to ensure fixed length keys.
+# This is toggle-able for debugging purposes, it is highly recommended to
+# always leave this set to true. (boolean value)
+#enable_key_mangler = true
+
+# Default lock timeout (in seconds) for distributed locking. (integer value)
+#default_lock_timeout = 5
+
+
+[ldap]
+
+#
+# From keystone
+#
+
+# URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified
+# as a comma separated string. The first URL to successfully bind is used for
+# the connection. (string value)
+#url = ldap://localhost
+
+# User BindDN to query the LDAP server. (string value)
+#user = <None>
+
+# Password for the BindDN to query the LDAP server. (string value)
+#password = <None>
+
+# LDAP server suffix (string value)
+#suffix = cn=example,cn=com
+
+# If true, will add a dummy member to groups. This is required if the
+# objectclass for groups requires the "member" attribute. (boolean value)
+#use_dumb_member = false
+
+# DN of the "dummy member" to use when "use_dumb_member" is enabled. (string
+# value)
+#dumb_member = cn=dumb,dc=nonexistent
+
+# Delete subtrees using the subtree delete control. Only enable this option if
+# your LDAP server supports subtree deletion. (boolean value)
+#allow_subtree_delete = false
+
+# The LDAP scope for queries, "one" represents oneLevel/singleLevel and "sub"
+# represents subtree/wholeSubtree options. (string value)
+# Allowed values: one, sub
+#query_scope = one
+
+# Maximum results per page; a value of zero ("0") disables paging. (integer
+# value)
+#page_size = 0
+
+# The LDAP dereferencing option for queries. The "default" option falls back to
+# using default dereferencing configured by your ldap.conf. (string value)
+# Allowed values: never, searching, always, finding, default
+#alias_dereferencing = default
+
+# Sets the LDAP debugging level for LDAP calls. A value of 0 means that
+# debugging is not enabled. This value is a bitmask, consult your LDAP
+# documentation for possible values. (integer value)
+#debug_level = <None>
+
+# Override the system's default referral chasing behavior for queries. (boolean
+# value)
+#chase_referrals = <None>
+
+# Search base for users. Defaults to the suffix value. (string value)
+#user_tree_dn = <None>
+
+# LDAP search filter for users. (string value)
+#user_filter = <None>
+
+# LDAP objectclass for users. (string value)
+#user_objectclass = inetOrgPerson
+
+# LDAP attribute mapped to user id. WARNING: must not be a multivalued
+# attribute. (string value)
+#user_id_attribute = cn
+
+# LDAP attribute mapped to user name. (string value)
+#user_name_attribute = sn
+
+# LDAP attribute mapped to user description. (string value)
+#user_description_attribute = description
+
+# LDAP attribute mapped to user email. (string value)
+#user_mail_attribute = mail
+
+# LDAP attribute mapped to password. (string value)
+#user_pass_attribute = userPassword
+
+# LDAP attribute mapped to user enabled flag. (string value)
+#user_enabled_attribute = enabled
+
+# Invert the meaning of the boolean enabled values. Some LDAP servers use a
+# boolean lock attribute where "true" means an account is disabled. Setting
+# "user_enabled_invert = true" will allow these lock attributes to be used.
+# This setting will have no effect if "user_enabled_mask" or
+# "user_enabled_emulation" settings are in use. (boolean value)
+#user_enabled_invert = false
+
+# Bitmask integer to indicate the bit that the enabled value is stored in if
+# the LDAP server represents "enabled" as a bit on an integer rather than a
+# boolean. A value of "0" indicates the mask is not used. If this is not set to
+# "0" the typical value is "2". This is typically used when
+# "user_enabled_attribute = userAccountControl". (integer value)
+#user_enabled_mask = 0
+
+# Default value to enable users. This should match an appropriate int value if
+# the LDAP server uses non-boolean (bitmask) values to indicate if a user is
+# enabled or disabled. If this is not set to "True" the typical value is "512".
+# This is typically used when "user_enabled_attribute = userAccountControl".
+# (string value)
+#user_enabled_default = True
+
+# List of attributes stripped off the user on update. (list value)
+#user_attribute_ignore = default_project_id
+
+# LDAP attribute mapped to default_project_id for users. (string value)
+#user_default_project_id_attribute = <None>
+
+# Allow user creation in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#user_allow_create = true
+
+# Allow user updates in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#user_allow_update = true
+
+# Allow user deletion in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#user_allow_delete = true
+
+# If true, Keystone uses an alternative method to determine if a user is
+# enabled or not by checking if they are a member of the
+# "user_enabled_emulation_dn" group. (boolean value)
+#user_enabled_emulation = false
+
+# DN of the group entry to hold enabled users when using enabled emulation.
+# (string value)
+#user_enabled_emulation_dn = <None>
+
+# Use the "group_member_attribute" and "group_objectclass" settings to
+# determine membership in the emulated enabled group. (boolean value)
+#user_enabled_emulation_use_group_config = false
+
+# List of additional LDAP attributes used for mapping additional attribute
+# mappings for users. Attribute mapping format is <ldap_attr>:<user_attr>,
+# where ldap_attr is the attribute in the LDAP entry and user_attr is the
+# Identity API attribute. (list value)
+#user_additional_attribute_mapping =
+
+# Search base for groups. Defaults to the suffix value. (string value)
+#group_tree_dn = <None>
+
+# LDAP search filter for groups. (string value)
+#group_filter = <None>
+
+# LDAP objectclass for groups. (string value)
+#group_objectclass = groupOfNames
+
+# LDAP attribute mapped to group id. (string value)
+#group_id_attribute = cn
+
+# LDAP attribute mapped to group name. (string value)
+#group_name_attribute = ou
+
+# LDAP attribute mapped to show group membership. (string value)
+#group_member_attribute = member
+
+# LDAP attribute mapped to group description. (string value)
+#group_desc_attribute = description
+
+# List of attributes stripped off the group on update. (list value)
+#group_attribute_ignore =
+
+# Allow group creation in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#group_allow_create = true
+
+# Allow group update in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#group_allow_update = true
+
+# Allow group deletion in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#group_allow_delete = true
+
+# Additional attribute mappings for groups. Attribute mapping format is
+# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry
+# and user_attr is the Identity API attribute. (list value)
+#group_additional_attribute_mapping =
+
+# CA certificate file path for communicating with LDAP servers. (string value)
+#tls_cacertfile = <None>
+
+# CA certificate directory path for communicating with LDAP servers. (string
+# value)
+#tls_cacertdir = <None>
+
+# Enable TLS for communicating with LDAP servers. (boolean value)
+#use_tls = false
+
+# Specifies what checks to perform on client certificates in an incoming TLS
+# session. (string value)
+# Allowed values: demand, never, allow
+#tls_req_cert = demand
+
+# Enable LDAP connection pooling. (boolean value)
+#use_pool = true
+
+# Connection pool size. (integer value)
+#pool_size = 10
+
+# Maximum count of reconnect trials. (integer value)
+#pool_retry_max = 3
+
+# Time span in seconds to wait between two reconnect trials. (floating point
+# value)
+#pool_retry_delay = 0.1
+
+# Connector timeout in seconds. Value -1 indicates indefinite wait for
+# response. (integer value)
+#pool_connection_timeout = -1
+
+# Connection lifetime in seconds. (integer value)
+#pool_connection_lifetime = 600
+
+# Enable LDAP connection pooling for end user authentication. If use_pool is
+# disabled, then this setting is meaningless and is not used at all. (boolean
+# value)
+#use_auth_pool = true
+
+# End user auth connection pool size. (integer value)
+#auth_pool_size = 100
+
+# End user auth connection lifetime in seconds. (integer value)
+#auth_pool_connection_lifetime = 60
+
+# If the members of the group objectclass are user IDs rather than DNs, set
+# this to true. This is the case when using posixGroup as the group objectclass
+# and OpenDirectory. (boolean value)
+#group_members_are_ids = false
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host = 127.0.0.1
+
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
+
+# Password for Redis server (optional). (string value)
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
+
+
+[memcache]
+
+#
+# From keystone
+#
+
+# Memcache servers in the format of "host:port". (list value)
+#servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is tried
+# again. This is used by the key value store system (e.g. token pooled
+# memcached persistence backend). (integer value)
+#dead_retry = 300
+
+# Timeout in seconds for every call to a server. This is used by the key value
+# store system (e.g. token pooled memcached persistence backend). (integer
+# value)
+#socket_timeout = 3
+
+# Max total number of open connections to every memcached server. This is used
+# by the key value store system (e.g. token pooled memcached persistence
+# backend). (integer value)
+#pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the pool before
+# it is closed. This is used by the key value store system (e.g. token pooled
+# memcached persistence backend). (integer value)
+#pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache client
+# connection. This is used by the key value store system (e.g. token pooled
+# memcached persistence backend). (integer value)
+#pool_connection_get_timeout = 10
+
+
+[oauth1]
+
+#
+# From keystone
+#
+
+# Entrypoint for the OAuth backend driver in the keystone.oauth1 namespace.
+# (string value)
+#driver = sql
+
+# Duration (in seconds) for the OAuth Request Token. (integer value)
+#request_token_duration = 28800
+
+# Duration (in seconds) for the OAuth Access Token. (integer value)
+#access_token_duration = 86400
+
+
+[os_inherit]
+
+#
+# From keystone
+#
+
+# role-assignment inheritance to projects from owning domain or from projects
+# higher in the hierarchy can be optionally disabled. In the future, this
+# option will be removed and the hierarchy will be always enabled. (boolean
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: The option to enable the OS-INHERIT extension has been deprecated in
+# the M release and will be removed in the O release. The OS-INHERIT extension
+# will be enabled by default.
+#enabled = true
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+#group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+#trace = false
+
+# CA certificate PEM file to verify server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+#ssl_key_password = <None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If not set,
+# we fall back to the same configuration used for RPC. (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
+# be used. This option may notbe available in future versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its replies.
+# This value should not be longer than rpc_response_timeout. (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we are
+# currently connected to becomes unavailable. Takes effect only if more than
+# one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
+
+# The RabbitMQ broker address where a single node is used. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+#rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/rabbit_port
+#rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+#rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+#rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+#rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
+# (integer value)
+#rabbit_interval_max = 30
+
+# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
+# count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
+# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
+# is no longer controlled by the x-ha-policy argument when declaring a queue.
+# If you just want to make sure that all queues (except  those with auto-
+# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
+# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL (x-expires).
+# Queues which are unused for the duration of the TTL are automatically
+# deleted. The parameter affects only reply and fanout queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
+# value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point
+# value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error (floating
+# point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`. (integer
+# value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available (integer
+# value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are considered stale
+# in seconds or None for no staleness. Stale connections are closed on acquire.
+# (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc
+# listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc reply
+# listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# reply. -1 means infinite retry during rpc_timeout (integer value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during sending RPC
+# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc
+# request could be processed more then one time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending RPC
+# message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_middleware]
+
+#
+# From oslo.middleware
+#
+
+# The maximum body size for each  request, in bytes. (integer value)
+# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
+# Deprecated group/name - [DEFAULT]/max_request_body_size
+#max_request_body_size = 114688
+
+# The HTTP Header that will be used to determine what the original request
+# protocol scheme was, even if it was hidden by an SSL termination proxy.
+# (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#secure_proxy_ssl_header = X-Forwarded-Proto
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched.  Missing or empty directories are ignored. (multi
+# valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[paste_deploy]
+
+#
+# From keystone
+#
+
+# Name of the paste configuration file that defines the available pipelines.
+# (string value)
+#config_file = keystone-paste.ini
+
+
+[policy]
+
+#
+# From keystone
+#
+
+# Entrypoint for the policy backend driver in the keystone.policy namespace.
+# Supplied drivers are rules and sql. (string value)
+#driver = sql
+
+# Maximum number of entities that will be returned in a policy collection.
+# (integer value)
+#list_limit = <None>
+
+
+[resource]
+
+#
+# From keystone
+#
+
+# Entrypoint for the resource backend driver in the keystone.resource
+# namespace. Only an SQL driver is supplied. If a resource driver is not
+# specified, the assignment driver will choose the resource driver. (string
+# value)
+#driver = <None>
+
+# Toggle for resource caching. This has no effect unless global caching is
+# enabled. (boolean value)
+# Deprecated group/name - [assignment]/caching
+#caching = true
+
+# TTL (in seconds) to cache resource data. This has no effect unless global
+# caching is enabled. (integer value)
+# Deprecated group/name - [assignment]/cache_time
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a resource collection.
+# (integer value)
+# Deprecated group/name - [assignment]/list_limit
+#list_limit = <None>
+
+# Name of the domain that owns the `admin_project_name`. Defaults to None.
+# (string value)
+#admin_project_domain_name = <None>
+
+# Special project for performing administrative operations on remote services.
+# Tokens scoped to this project will contain the key/value
+# `is_admin_project=true`. Defaults to None. (string value)
+#admin_project_name = <None>
+
+# Whether the names of projects are restricted from containing url reserved
+# characters. If set to new, attempts to create or update a project with a url
+# unsafe name will return an error. In addition, if set to strict, attempts to
+# scope a token using an unsafe project name will return an error. (string
+# value)
+# Allowed values: off, new, strict
+#project_name_url_safe = off
+
+# Whether the names of domains are restricted from containing url reserved
+# characters. If set to new, attempts to create or update a domain with a url
+# unsafe name will return an error. In addition, if set to strict, attempts to
+# scope a token using a domain name which is unsafe will return an error.
+# (string value)
+# Allowed values: off, new, strict
+#domain_name_url_safe = off
+
+
+[revoke]
+
+#
+# From keystone
+#
+
+# Entrypoint for an implementation of the backend for persisting revocation
+# events in the keystone.revoke namespace. Supplied drivers are kvs and sql.
+# (string value)
+#driver = sql
+
+# This value (calculated in seconds) is added to token expiration before a
+# revocation event may be removed from the backend. (integer value)
+#expiration_buffer = 1800
+
+# Toggle for revocation event caching. This has no effect unless global caching
+# is enabled. (boolean value)
+#caching = true
+
+# Time to cache the revocation list and the revocation events (in seconds).
+# This has no effect unless global and token caching are enabled. (integer
+# value)
+# Deprecated group/name - [token]/revocation_cache_time
+#cache_time = 3600
+
+
+[role]
+
+#
+# From keystone
+#
+
+# Entrypoint for the role backend driver in the keystone.role namespace.
+# Supplied drivers are ldap and sql. (string value)
+#driver = <None>
+
+# Toggle for role caching. This has no effect unless global caching is enabled.
+# (boolean value)
+#caching = true
+
+# TTL (in seconds) to cache role data. This has no effect unless global caching
+# is enabled. (integer value)
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a role collection.
+# (integer value)
+#list_limit = <None>
+
+
+[saml]
+
+#
+# From keystone
+#
+
+# Default TTL, in seconds, for any generated SAML assertion created by
+# Keystone. (integer value)
+#assertion_expiration_time = 3600
+
+# Binary to be called for XML signing. Install the appropriate package, specify
+# absolute path or adjust your PATH environment variable if the binary cannot
+# be found. (string value)
+#xmlsec1_binary = xmlsec1
+
+# Path of the certfile for SAML signing. For non-production environments, you
+# may be interested in using `keystone-manage pki_setup` to generate self-
+# signed certificates. Note, the path cannot contain a comma. (string value)
+#certfile = /etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for SAML signing. Note, the path cannot contain a comma.
+# (string value)
+#keyfile = /etc/keystone/ssl/private/signing_key.pem
+
+# Entity ID value for unique Identity Provider identification. Usually FQDN is
+# set with a suffix. A value is required to generate IDP Metadata. For example:
+# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp (string value)
+#idp_entity_id = <None>
+
+# Identity Provider Single-Sign-On service value, required in the Identity
+# Provider's metadata. A value is required to generate IDP Metadata. For
+# example: https://keystone.example.com/v3/OS-FEDERATION/saml2/sso (string
+# value)
+#idp_sso_endpoint = <None>
+
+# Language used by the organization. (string value)
+#idp_lang = en
+
+# Organization name the installation belongs to. (string value)
+#idp_organization_name = <None>
+
+# Organization name to be displayed. (string value)
+#idp_organization_display_name = <None>
+
+# URL of the organization. (string value)
+#idp_organization_url = <None>
+
+# Company of contact person. (string value)
+#idp_contact_company = <None>
+
+# Given name of contact person (string value)
+#idp_contact_name = <None>
+
+# Surname of contact person. (string value)
+#idp_contact_surname = <None>
+
+# Email address of contact person. (string value)
+#idp_contact_email = <None>
+
+# Telephone number of contact person. (string value)
+#idp_contact_telephone = <None>
+
+# The contact type describing the main point of contact for the identity
+# provider. (string value)
+# Allowed values: technical, support, administrative, billing, other
+#idp_contact_type = other
+
+# Path to the Identity Provider Metadata file. This file should be generated
+# with the keystone-manage saml_idp_metadata command. (string value)
+#idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml
+
+# The prefix to use for the RelayState SAML attribute, used when generating ECP
+# wrapped assertions. (string value)
+#relay_state_prefix = ss:mem:
+
+
+[shadow_users]
+
+#
+# From keystone
+#
+
+# Entrypoint for the shadow users backend driver in the
+# keystone.identity.shadow_users namespace. (string value)
+#driver = sql
+
+
+[signing]
+
+#
+# From keystone
+#
+
+# Path of the certfile for token signing. For non-production environments, you
+# may be interested in using `keystone-manage pki_setup` to generate self-
+# signed certificates. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#certfile = /etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#keyfile = /etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#ca_certs = /etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key for token signing. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#ca_key = /etc/keystone/ssl/private/cakey.pem
+
+# Key size (in bits) for token signing cert (auto generated certificate).
+# (integer value)
+# Minimum value: 1024
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#key_size = 2048
+
+# Days the token signing cert is valid for (auto generated certificate).
+# (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#valid_days = 3650
+
+# Certificate subject (auto generated certificate) for token signing. (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# From keystone
+#
+
+# Path of the CA key file for SSL. (string value)
+#ca_key = /etc/keystone/ssl/private/cakey.pem
+
+# SSL key length (in bits) (auto generated certificate). (integer value)
+# Minimum value: 1024
+#key_size = 1024
+
+# Days the certificate is valid for once signed (auto generated certificate).
+# (integer value)
+#valid_days = 3650
+
+# SSL certificate subject (auto generated certificate). (string value)
+#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+
+[token]
+provider = fernet
+
+#
+# From keystone
+#
+
+# External auth mechanisms that should add bind information to token, e.g.,
+# kerberos,x509. (list value)
+#bind =
+
+# Enforcement policy on tokens presented to Keystone with bind information. One
+# of disabled, permissive, strict, required or a specifically required bind
+# mode, e.g., kerberos or x509 to require binding to that authentication.
+# (string value)
+#enforce_token_bind = permissive
+
+# Amount of time a token should remain valid (in seconds). (integer value)
+#expiration = 3600
+
+# Controls the token construction, validation, and revocation operations.
+# Entrypoint in the keystone.token.provider namespace. Core providers are
+# [fernet|pkiz|pki|uuid]. (string value)
+#provider = uuid
+
+# Entrypoint for the token persistence backend driver in the
+# keystone.token.persistence namespace. Supplied drivers are kvs, memcache,
+# memcache_pool, and sql. (string value)
+#driver = sql
+
+# Toggle for token system caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache tokens (in seconds). This has no effect unless global and token
+# caching are enabled. (integer value)
+#cache_time = <None>
+
+# Revoke token by token identifier. Setting revoke_by_id to true enables
+# various forms of enumerating tokens, e.g. `list tokens for user`. These
+# enumerations are processed to determine the list of tokens to revoke. Only
+# disable if you are switching to using the Revoke extension with a backend
+# other than KVS, which stores events in memory. (boolean value)
+#revoke_by_id = true
+
+# Allow rescoping of scoped token. Setting allow_rescoped_scoped_token to false
+# prevents a user from exchanging a scoped token for any other token. (boolean
+# value)
+#allow_rescope_scoped_token = true
+
+# The hash algorithm to use for PKI tokens. This can be set to any algorithm
+# that hashlib supports. WARNING: Before changing this value, the auth_token
+# middleware must be configured with the hash_algorithms, otherwise token
+# revocation will not be processed correctly. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#hash_algorithm = md5
+
+# Add roles to token that are not explicitly added, but that are linked
+# implicitly to other roles. (boolean value)
+#infer_roles = true
+
+
+[tokenless_auth]
+
+#
+# From keystone
+#
+
+# The list of trusted issuers to further filter the certificates that are
+# allowed to participate in the X.509 tokenless authorization. If the option is
+# absent then no certificates will be allowed. The naming format for the
+# attributes of a Distinguished Name(DN) must be separated by a comma and
+# contain no spaces. This configuration option may be repeated for multiple
+# values. For example: trusted_issuer=CN=john,OU=keystone,O=openstack
+# trusted_issuer=CN=mary,OU=eng,O=abc (multi valued)
+#trusted_issuer =
+
+# The protocol name for the X.509 tokenless authorization along with the option
+# issuer_attribute below can look up its corresponding mapping. (string value)
+#protocol = x509
+
+# The issuer attribute that is served as an IdP ID for the X.509 tokenless
+# authorization along with the protocol to look up its corresponding mapping.
+# It is the environment variable in the WSGI environment that references to the
+# issuer of the client certificate. (string value)
+#issuer_attribute = SSL_CLIENT_I_DN
+
+
+[trust]
+
+#
+# From keystone
+#
+
+# Delegation and impersonation features can be optionally disabled. (boolean
+# value)
+#enabled = true
+
+# Enable redelegation feature. (boolean value)
+#allow_redelegation = false
+
+# Maximum depth of trust redelegation. (integer value)
+#max_redelegation_count = 3
+
+# Entrypoint for the trust backend driver in the keystone.trust namespace.
+# (string value)
+#driver = sql
+
+[extra_headers]
+Distribution = Ubuntu
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/keystone.override.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/keystone.override.j2
new file mode 100644
index 0000000..2905494
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/keystone.override.j2
@@ -0,0 +1 @@
+manual
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/mongodb.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/mongodb.conf.j2
new file mode 100644
index 0000000..5feeba2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/mongodb.conf.j2
@@ -0,0 +1,101 @@
+# mongodb.conf
+
+# Where to store the data.
+dbpath=/var/lib/mongodb
+
+#where to log
+logpath=/var/log/mongodb/mongodb.log
+
+logappend=true
+
+bind_ip = localhost
+#port = 27017
+
+# Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
+journal=true
+smallfiles = true
+
+# Enables periodic logging of CPU utilization and I/O wait
+#cpu = true
+
+# Turn on/off security.  Off is currently the default
+#noauth = true
+#auth = true
+
+# Verbose logging output.
+#verbose = true
+
+# Inspect all client data for validity on receipt (useful for
+# developing drivers)
+#objcheck = true
+
+# Enable db quota management
+#quota = true
+
+# Set oplogging level where n is
+#   0=off (default)
+#   1=W
+#   2=R
+#   3=both
+#   7=W+some reads
+#oplog = 0
+
+# Diagnostic/debugging option
+#nocursors = true
+
+# Ignore query hints
+#nohints = true
+
+# Disable the HTTP interface (Defaults to localhost:27018).
+#nohttpinterface = true
+
+# Turns off server-side scripting.  This will result in greatly limited
+# functionality
+#noscripting = true
+
+# Turns off table scans.  Any query that would do a table scan fails.
+#notablescan = true
+
+# Disable data file preallocation.
+#noprealloc = true
+
+# Specify .ns file size for new databases.
+# nssize = <size>
+
+# Accout token for Mongo monitoring server.
+#mms-token = <token>
+
+# Server name for Mongo monitoring server.
+#mms-name = <server-name>
+
+# Ping interval for Mongo monitoring server.
+#mms-interval = <seconds>
+
+# Replication Options
+
+# in replicated mongo databases, specify here whether this is a slave or master
+#slave = true
+#source = master.example.com
+# Slave only: specify a single database to replicate
+#only = master.example.com
+# or
+#master = true
+#source = slave.example.com
+
+# Address of a server to pair with.
+#pairwith = <server:port>
+# Address of arbiter server.
+#arbiter = <server:port>
+# Automatically resync if slave data is stale
+#autoresync
+# Custom size for replication operation log.
+#oplogSize = <MB>
+# Size limit for in-memory storage of op ids.
+#opIdMem = <bytes>
+
+# SSL options
+# Enable SSL on normal ports
+#sslOnNormalPorts = true
+# SSL Key file and password
+#sslPEMKeyFile = /etc/ssl/mongodb.pem
+#sslPEMKeyPassword = pass
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/openstack.cnf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/openstack.cnf.j2
new file mode 100644
index 0000000..1aeec60
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/openstack.cnf.j2
@@ -0,0 +1,7 @@
+[mysqld] 
+bind-address = localhost 
+default-storage-engine = innodb 
+innodb_file_per_table 
+collation-server = utf8_general_ci 
+init-connect = 'SET NAMES utf8' 
+character-set-server = utf8
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/wsgi-keystone.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/wsgi-keystone.conf.j2
new file mode 100644
index 0000000..e5698e2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v2/templates/wsgi-keystone.conf.j2
@@ -0,0 +1,32 @@
+Listen 5000
+Listen 35357
+
+<VirtualHost *:5000>
+    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
+    WSGIProcessGroup keystone-public
+    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    ErrorLogFormat "%{cu}t %M"
+    ErrorLog /var/log/apache2/keystone.log
+    CustomLog /var/log/apache2/keystone_access.log combined
+
+    <Directory /usr/bin>
+        Require all granted
+    </Directory>
+</VirtualHost>
+
+<VirtualHost *:35357>
+    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
+    WSGIProcessGroup keystone-admin
+    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    ErrorLogFormat "%{cu}t %M"
+    ErrorLog /var/log/apache2/keystone.log
+    CustomLog /var/log/apache2/keystone_access.log combined
+
+    <Directory /usr/bin>
+        Require all granted
+    </Directory>
+</VirtualHost>
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/README.md b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/README.md
new file mode 100644
index 0000000..7b5157f
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/README.md
@@ -0,0 +1,21 @@
+# ceilometer-ansible-mitaka
+
+1.Adding of ceilometer user in mongodb using mongodb ansible module is throwing error,Need to fix.
+  Workaroung using scripter is added.
+2.Mongodb connection in ceilometer.conf mitaka has error.
+  
+  [database]
+...
+connection = mongodb://ceilometer:CEILOMETER_DBPASS@controller:27017/ceilometer
+
+ The above connection is not working.So replaced with below lines:
+ metering_connection = mongodb://localhost:27017/ceilometer
+ event_connection = mongodb://localhost:27017/ceilometer
+
+3.# mongo --host controller --eval '
+  db = db.getSiblingDB("ceilometer");
+  db.createUser({user: "ceilometer",
+  pwd: "CEILOMETER_DBPASS",
+  roles: [ "readWrite", "dbAdmin" ]})'
+
+  The above command creteUser has some issues,so it is replaced with addUser(point 1 using script).
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/admin-openrc.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/admin-openrc.sh
new file mode 100644
index 0000000..4720f4c
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/admin-openrc.sh
@@ -0,0 +1,9 @@
+export OS_PROJECT_DOMAIN_NAME=default
+export OS_USER_DOMAIN_NAME=default
+export OS_PROJECT_NAME=admin
+export OS_USERNAME=admin
+export OS_PASSWORD=password
+export OS_AUTH_URL=http://localhost:35357/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
+ceilometer meter-list
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/configure_users_mitaka.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/configure_users_mitaka.sh
new file mode 100644
index 0000000..11c2da2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/configure_users_mitaka.sh
@@ -0,0 +1,40 @@
+set -x
+export OS_TOKEN=ADMIN_TOKEN
+export OS_URL=http://localhost:35357/v3
+export OS_IDENTITY_API_VERSION=3
+#Deleting services:
+for i in $(openstack service list -f table -c ID); do openstack service delete $i; done
+for i in $(openstack user list -f table -c ID); do openstack user delete $i; done
+for i in $(openstack role list -f table -c ID); do openstack role delete $i; done
+for i in $(openstack project list -f table -c ID); do openstack project delete $i; done
+openstack service create --name keystone --description "OpenStack Identity" identity
+openstack endpoint create --region RegionOne identity public http://localhost:5000/v3
+openstack endpoint create --region RegionOne identity internal http://localhost:5000/v3
+openstack endpoint create --region RegionOne identity admin http://localhost:35357/v3
+
+openstack domain create --description "Default Domain" default
+
+openstack project create --domain default --description "Admin Project" admin
+
+openstack user create --domain default --password password admin
+
+openstack role create admin
+openstack role add --project admin --user admin admin
+
+openstack project create --domain default --description "Service Project" service
+openstack project create --domain default --description "Demo Project" demo
+
+openstack user create --domain default --password password demo
+openstack role create user
+openstack role add --project demo --user demo user
+
+openstack user create --domain default --password password ceilometer
+openstack role add --project service --user ceilometer admin
+
+openstack service create --name ceilometer --description "Telemetry" metering
+openstack endpoint create --region RegionOne   metering public http://localhost:8777
+openstack endpoint create --region RegionOne   metering internal http://localhost:8777
+openstack endpoint create --region RegionOne   metering admin http://localhost:8777
+
+openstack user list
+openstack service list
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/install_ansible.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/install_ansible.sh
new file mode 100644
index 0000000..40dcdd0
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/install_ansible.sh
@@ -0,0 +1,7 @@
+sudo apt-get update
+sudo apt-get -y install software-properties-common git mosh tmux dnsutils python-netaddr
+sudo add-apt-repository -y ppa:ansible/ansible
+sudo apt-get update
+sudo apt-get install -y ansible
+[ -e ~/.ssh/id_rsa ] || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/mongo_user.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/mongo_user.sh
new file mode 100644
index 0000000..54f632e
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/mongo_user.sh
@@ -0,0 +1,6 @@
+mongo --host localhost --eval '
+  db = db.getSiblingDB("ceilometer");
+  db.addUser({user: "ceilometer",
+  pwd: "password",
+  roles: [ "readWrite", "dbAdmin" ]})'
+
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/os_ceilometer_mitaka.yml b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/os_ceilometer_mitaka.yml
new file mode 100644
index 0000000..4ed2da2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/os_ceilometer_mitaka.yml
@@ -0,0 +1,180 @@
+---
+- name: Install Standalone ceilometer
+  hosts: local
+  vars:
+    mysql_root_password: "password"
+  sudo: yes
+  tasks:
+  # Adding cloud repo and update,upgrade
+  - name: install repository
+    apt: name=software-properties-common state=present
+  - name: Adding package to repository list
+    shell: add-apt-repository cloud-archive:mitaka
+  - name: apt update
+    apt: update_cache=yes
+  - name: apt dist-upgrade
+    apt: upgrade=dist
+  - name: installing openstack clients
+    apt: name=python-openstackclient state=present 
+ #Installing Mysql Service
+  - name: Install Mysql Service
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - mariadb-server
+      - python-pymysql
+      - python-mysqldb
+
+  - name: Copy cnf file 
+    template: src=openstack.cnf.j2 dest=/etc/mysql/conf.d/openstack.cnf owner=root group=root mode=0644 
+  - name: Start the MySQL service
+    service: name=mysql state=restarted enabled=true
+
+  # Mysql secure installation
+  # Note: Please comment this section if this playbook is not excuting first time(fix me)
+  - name: delete anonymous MySQL server user for localhost
+    action: mysql_user user="" host="localhost" state="absent"
+  - name: delete anonymous MySQL server user for localhost
+    action: mysql_user user="" state="absent"
+  - name: remove the MySQL test database
+    action: mysql_db db=test state=absent
+
+  #Updating root permissions
+  - name: update mysql root password for all root accounts
+    sudo: yes
+    mysql_user: 
+      name: root 
+      host: "{{ item }}" 
+      login_user: root
+      #password: "{{ mysql_root_password }}"
+      password: "password"
+      login_password: "{{ mysql_root_password }}"
+      check_implicit_admin: yes
+      priv: "*.*:ALL,GRANT"
+    with_items:
+      - "{{ ansible_hostname }}"
+      - 127.0.0.1
+      - ::1
+      - localhost
+  #Installing rabbitmq service
+  - name: rabbitmq-server
+    apt: name=rabbitmq-server state=present
+  - name : Adding Rabbitmq user
+    shell : rabbitmqctl add_user openstack "password";rabbitmqctl set_permissions openstack ".*" ".*" ".*"
+
+  # Installing Keystone Service
+  - name: Creating keystone.override file 
+    template: src=keystone.override.j2 dest=/etc/init/keystone.override owner=root group=root mode=0644 
+
+  - name: Install Keystone
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - keystone 
+      - apache2 
+      - libapache2-mod-wsgi 	
+
+    # installing memcached
+  - name : installing memcached service
+    apt: name={{ item }}  state=present   
+    with_items:
+      - memcached
+      - python-memcache
+  - name: reStart the memcached service
+    service: name=memcached state=restarted enabled=true	  
+    #editing memcache conf file
+  - name: Adding new line
+    lineinfile: dest=/etc/memcached.conf line="-l 127.0.0.1"
+  - name: Restart memcached service
+    service: name=memcached state=restarted enabled=true  	
+  - name: Keystone create DB for service
+    mysql_db:
+      login_user: "root"
+      login_password: "password"
+      login_host: "localhost"
+      name: "keystone"
+      state: "present"
+  - name: Keystone grant access to the DB for the service
+    mysql_user:
+      login_user: "root"
+      login_password: "password"
+      login_host: "localhost"
+      name: "keystone"
+      password: "password"
+      host: "{{ item }}"
+      state: "present"
+      priv: "keystone.*:ALL"
+    with_items:
+       - "localhost"
+       - "%"
+ # Installing Keystone Service
+  - name: Creating keystone.override file 
+    template: src=keystone.override.j2 dest=/etc/init/keystone.override owner=root group=root mode=0644 
+
+  - name: Install Keystone
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - keystone 
+      - apache2 
+      - libapache2-mod-wsgi 
+  - name: Creating keystone.conf  
+    template: src=keystone.conf.j2 dest=/etc/keystone/keystone.conf owner=root group=root mode=0644
+  - name: Running sync database
+    shell: /bin/sh -c "keystone-manage db_sync" keystone
+  - name: initializing fernet keystone
+    shell: keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone  
+  # Configuring apache server
+  - name: Adding new line
+    lineinfile: dest=/etc/apache2/apache2.conf line="ServerName localhost"
+
+  - name: Creating wsgi-keystone.conf  
+    template: src=wsgi-keystone.conf.j2 dest=/etc/apache2/sites-available/wsgi-keystone.conf owner=root group=root mode=0644
+  - name : Configuring apache2
+    shell:  ln -s /etc/apache2/sites-available/wsgi-keystone.conf /etc/apache2/sites-enabled
+    ignore_errors: yes
+  - name: Restart apache2 service
+    service: name=apache2 state=restarted enabled=true	
+  - name : removing previous databases
+    shell: rm -f /var/lib/keystone/keystone.db
+ #Installing mongod db server
+  - name: Install the Mongo db server
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - mongodb-server
+      - mongodb-clients
+      - python-pymongo
+  - name: Creating mongodb.conf
+    template: src=mongodb.conf.j2 dest=/etc/mongodb.conf owner=root group=root mode=0644
+  - name: Stopping mongodb service
+    service: name=mongodb state=stopped
+  - name: Removing mongodb files
+    shell: rm -rf /var/lib/mongodb/journal/prealloc.*
+  - name: starting mongodb service
+    service: name=mongodb state=started
+  - name : Configuring users
+    script: configure_users_mitaka.sh
+  - name: Adding ceilometer database
+    script: mongo_user.sh
+    #mongodb_user: database=ceilometer name=ceilometer password=password roles='readWrite,userAdmin' state=present	
+  #Installing Ceilometer Services
+  - name : Install Ceilometer services
+    apt: name={{ item }} state=installed update_cache=yes
+    with_items:
+      - ceilometer-api
+      - ceilometer-collector
+      - ceilometer-agent-central
+      - ceilometer-agent-notification
+      - python-ceilometerclient
+ 
+  - name: Creating Ceilometer.conf
+    template: src=ceilometer.conf.j2 dest=/etc/ceilometer/ceilometer.conf owner=root group=root mode=0644
+
+  - name: Restarting ceilometer-agent-central
+    service: name=ceilometer-agent-central state=restarted
+
+  - name: Restarting ceilometer-agent-notification
+    service: name=ceilometer-agent-notification state=restarted
+
+  - name: Restarting Celometer API
+    service: name=ceilometer-api state=restarted
+
+  - name: Restarting ceilometer-collector
+    service: name=ceilometer-collector state=restarted
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/apache2.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/apache2.conf.j2
new file mode 100644
index 0000000..52c0eeb
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/apache2.conf.j2
@@ -0,0 +1,222 @@
+# This is the main Apache server configuration file.  It contains the
+# configuration directives that give the server its instructions.
+# See http://httpd.apache.org/docs/2.4/ for detailed information about
+# the directives and /usr/share/doc/apache2/README.Debian about Debian specific
+# hints.
+#
+#
+# Summary of how the Apache 2 configuration works in Debian:
+# The Apache 2 web server configuration in Debian is quite different to
+# upstream's suggested way to configure the web server. This is because Debian's
+# default Apache2 installation attempts to make adding and removing modules,
+# virtual hosts, and extra configuration directives as flexible as possible, in
+# order to make automating the changes and administering the server as easy as
+# possible.
+
+# It is split into several files forming the configuration hierarchy outlined
+# below, all located in the /etc/apache2/ directory:
+#
+#	/etc/apache2/
+#	|-- apache2.conf
+#	|	`--  ports.conf
+#	|-- mods-enabled
+#	|	|-- *.load
+#	|	`-- *.conf
+#	|-- conf-enabled
+#	|	`-- *.conf
+# 	`-- sites-enabled
+#	 	`-- *.conf
+#
+#
+# * apache2.conf is the main configuration file (this file). It puts the pieces
+#   together by including all remaining configuration files when starting up the
+#   web server.
+#
+# * ports.conf is always included from the main configuration file. It is
+#   supposed to determine listening ports for incoming connections which can be
+#   customized anytime.
+#
+# * Configuration files in the mods-enabled/, conf-enabled/ and sites-enabled/
+#   directories contain particular configuration snippets which manage modules,
+#   global configuration fragments, or virtual host configurations,
+#   respectively.
+#
+#   They are activated by symlinking available configuration files from their
+#   respective *-available/ counterparts. These should be managed by using our
+#   helpers a2enmod/a2dismod, a2ensite/a2dissite and a2enconf/a2disconf. See
+#   their respective man pages for detailed information.
+#
+# * The binary is called apache2. Due to the use of environment variables, in
+#   the default configuration, apache2 needs to be started/stopped with
+#   /etc/init.d/apache2 or apache2ctl. Calling /usr/bin/apache2 directly will not
+#   work with the default configuration.
+
+
+# Global configuration
+#
+
+#
+# ServerRoot: The top of the directory tree under which the server's
+# configuration, error, and log files are kept.
+#
+# NOTE!  If you intend to place this on an NFS (or otherwise network)
+# mounted filesystem then please read the Mutex documentation (available
+# at <URL:http://httpd.apache.org/docs/2.4/mod/core.html#mutex>);
+# you will save yourself a lot of trouble.
+#
+# Do NOT add a slash at the end of the directory path.
+#
+#ServerRoot "/etc/apache2"
+
+#
+# The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
+#
+Mutex file:${APACHE_LOCK_DIR} default
+
+#
+# PidFile: The file in which the server should record its process
+# identification number when it starts.
+# This needs to be set in /etc/apache2/envvars
+#
+PidFile ${APACHE_PID_FILE}
+
+#
+# Timeout: The number of seconds before receives and sends time out.
+#
+Timeout 300
+
+#
+# KeepAlive: Whether or not to allow persistent connections (more than
+# one request per connection). Set to "Off" to deactivate.
+#
+KeepAlive On
+
+#
+# MaxKeepAliveRequests: The maximum number of requests to allow
+# during a persistent connection. Set to 0 to allow an unlimited amount.
+# We recommend you leave this number high, for maximum performance.
+#
+MaxKeepAliveRequests 100
+
+#
+# KeepAliveTimeout: Number of seconds to wait for the next request from the
+# same client on the same connection.
+#
+KeepAliveTimeout 5
+
+
+# These need to be set in /etc/apache2/envvars
+User ${APACHE_RUN_USER}
+Group ${APACHE_RUN_GROUP}
+
+#
+# HostnameLookups: Log the names of clients or just their IP addresses
+# e.g., www.apache.org (on) or 204.62.129.132 (off).
+# The default is off because it'd be overall better for the net if people
+# had to knowingly turn this feature on, since enabling it means that
+# each client request will result in AT LEAST one lookup request to the
+# nameserver.
+#
+HostnameLookups Off
+
+# ErrorLog: The location of the error log file.
+# If you do not specify an ErrorLog directive within a <VirtualHost>
+# container, error messages relating to that virtual host will be
+# logged here.  If you *do* define an error logfile for a <VirtualHost>
+# container, that host's errors will be logged there and not here.
+#
+ErrorLog ${APACHE_LOG_DIR}/error.log
+
+#
+# LogLevel: Control the severity of messages logged to the error_log.
+# Available values: trace8, ..., trace1, debug, info, notice, warn,
+# error, crit, alert, emerg.
+# It is also possible to configure the log level for particular modules, e.g.
+# "LogLevel info ssl:warn"
+#
+LogLevel warn
+
+# Include module configuration:
+IncludeOptional mods-enabled/*.load
+IncludeOptional mods-enabled/*.conf
+
+# Include list of ports to listen on
+Include ports.conf
+
+
+# Sets the default security model of the Apache2 HTTPD server. It does
+# not allow access to the root filesystem outside of /usr/share and /var/www.
+# The former is used by web applications packaged in Debian,
+# the latter may be used for local directories served by the web server. If
+# your system is serving content from a sub-directory in /srv you must allow
+# access here, or in any related virtual host.
+<Directory />
+	Options FollowSymLinks
+	AllowOverride None
+	Require all denied
+</Directory>
+
+<Directory /usr/share>
+	AllowOverride None
+	Require all granted
+</Directory>
+
+<Directory /var/www/>
+	Options Indexes FollowSymLinks
+	AllowOverride None
+	Require all granted
+</Directory>
+
+#<Directory /srv/>
+#	Options Indexes FollowSymLinks
+#	AllowOverride None
+#	Require all granted
+#</Directory>
+
+
+
+
+# AccessFileName: The name of the file to look for in each directory
+# for additional configuration directives.  See also the AllowOverride
+# directive.
+#
+AccessFileName .htaccess
+
+#
+# The following lines prevent .htaccess and .htpasswd files from being
+# viewed by Web clients.
+#
+<FilesMatch "^\.ht">
+	Require all denied
+</FilesMatch>
+
+
+#
+# The following directives define some format nicknames for use with
+# a CustomLog directive.
+#
+# These deviate from the Common Log Format definitions in that they use %O
+# (the actual bytes sent including headers) instead of %b (the size of the
+# requested file), because the latter makes it impossible to detect partial
+# requests.
+#
+# Note that the use of %{X-Forwarded-For}i instead of %h is not recommended.
+# Use mod_remoteip instead.
+#
+LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
+LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
+LogFormat "%h %l %u %t \"%r\" %>s %O" common
+LogFormat "%{Referer}i -> %U" referer
+LogFormat "%{User-agent}i" agent
+
+# Include of directories ignores editors' and dpkg's backup files,
+# see README.Debian for details.
+
+# Include generic snippets of statements
+IncludeOptional conf-enabled/*.conf
+
+# Include the virtual host configurations:
+IncludeOptional sites-enabled/*.conf
+
+# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
+ServerName localhost 
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/ceilometer.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/ceilometer.conf.j2
new file mode 100644
index 0000000..b89a3d9
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/ceilometer.conf.j2
@@ -0,0 +1,960 @@
+[DEFAULT]
+rpc_backend = rabbit
+auth_strategy = keystone
+debug = True
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
+
+# Type of concurrency used. Either "native" or "eventlet" (string value)
+#rpc_zmq_concurrency = eventlet
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no linger
+# period. Pending messages shall be discarded immediately when the socket is
+# closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
+
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target
+# ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = true
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full configuration. If
+# not set, we fall back to the rpc_backend option and driver specific
+# configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers include amqp
+# and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange = openstack
+
+#
+# From oslo.service.service
+#
+
+# Enable eventlet backdoor.  Acceptable values are 0, <port>, and
+# <start>:<end>, where 0 results in listening on a random tcp port number;
+# <port> results in listening on the specified port number (and not enabling
+# backdoor if that port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range of port numbers.
+# The chosen port is displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+# Enables or disables logging values of all registered options when starting a
+# service (at DEBUG level). (boolean value)
+#log_options = true
+
+# Specify a timeout after which a gracefully shutdown server will exit. Zero
+# value means endless wait. (integer value)
+#graceful_shutdown_timeout = 60
+
+
+[cors]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,POST,PUT,DELETE,OPTIONS
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,POST,PUT,DELETE,OPTIONS
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+
+
+[database]
+#connection = mongodb://ceilometer:password@localhost:27017/ceilometer
+metering_connection = mongodb://localhost:27017/ceilometer
+event_connection = mongodb://localhost:27017/ceilometer
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+
+[keystone_authtoken]
+auth_uri = http://localhost:5000
+auth_url = http://localhost:35357
+memcached_servers = localhost:11211
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = ceilometer
+password = password
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+#auth_uri = <None>
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but delegate the
+# authorization decision to downstream WSGI components. (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server. (integer
+# value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with Identity
+# API Server. (integer value)
+#http_request_max_retries = 3
+
+# Env key for the swift cache. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+
+# Directory used to cache files related to PKI tokens. (string value)
+#signing_dir = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching. If left
+# undefined, tokens will instead be cached in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the middleware
+# caches previously-seen tokens for a configurable duration (in seconds). Set
+# to -1 to disable caching completely. (integer value)
+#token_cache_time = 300
+
+# Determines the frequency at which the list of revoked tokens is retrieved
+# from the Identity service (in seconds). A high number of revocation events
+# combined with a low cache duration may significantly reduce performance.
+# (integer value)
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be authenticated or
+# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
+# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
+# cache. If the value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This string is
+# used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead before it is
+# tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every memcached
+# server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a memcached
+# server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a memcached
+# client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool. The
+# advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
+# middleware will not ask for service catalog on token validation and will not
+# set the X-Service-Catalog header. (boolean value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to: "disabled"
+# to not check token binding. "permissive" (default) to validate binding
+# information if the bind type is of a form known to the server and ignore it
+# if not. "strict" like "permissive" but if the bind type is unknown the token
+# will be rejected. "required" any form of token binding is needed to be
+# allowed. Finally the name of a binding method that must be present in tokens.
+# (string value)
+#enforce_token_bind = permissive
+
+# If true, the revocation list will be checked for cached tokens. This requires
+# that PKI tokens are configured on the identity server. (boolean value)
+#check_revocations_for_cached = false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
+# or multiple. The algorithms are those supported by Python standard
+# hashlib.new(). The hashes will be tried in the order given, so put the
+# preferred one first for performance. The result of the first hash will be
+# stored in the cache. This will typically be set to multiple values only while
+# migrating from a less secure algorithm to a more secure one. Once all the old
+# tokens are expired this option should be set to a single value for better
+# performance. (list value)
+#hash_algorithms = md5
+
+# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
+# (string value)
+#auth_admin_prefix =
+
+# Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
+# (string value)
+#auth_host = 127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use identity_uri.
+# (integer value)
+#auth_port = 35357
+
+# Protocol of the admin Identity API endpoint. Deprecated, use identity_uri.
+# (string value)
+# Allowed values: http, https
+#auth_protocol = https
+
+# Complete admin Identity API endpoint. This should specify the unversioned
+# root endpoint e.g. https://localhost:35357/ (string value)
+#identity_uri = <None>
+
+# This option is deprecated and may be removed in a future release. Single
+# shared secret with the Keystone configuration used for bootstrapping a
+# Keystone installation, or otherwise bypassing the normal authentication
+# process. This option should not be used, use `admin_user` and
+# `admin_password` instead. (string value)
+#admin_token = <None>
+
+# Service username. (string value)
+#admin_user = <None>
+
+# Service user password. (string value)
+#admin_password = <None>
+
+# Service tenant name. (string value)
+#admin_tenant_name = admin
+
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (unknown value)
+#auth_section = <None>
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host = 127.0.0.1
+
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
+
+# Password for Redis server (optional). (string value)
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
+
+
+[oslo_concurrency]
+
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+# Deprecated group/name - [DEFAULT]/disable_process_locking
+#disable_process_locking = false
+
+# Directory to use for lock files.  For security, the specified directory
+# should only be writable by the user running the processes that need locking.
+# Defaults to environment variable OSLO_LOCK_PATH. If OSLO_LOCK_PATH is not set
+# in the environment, use the Python tempfile.gettempdir function to find a
+# suitable location. If external locks are used, a lock path must be set.
+# (string value)
+# Deprecated group/name - [DEFAULT]/lock_path
+#lock_path = /tmp
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+#group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+#trace = false
+
+# CA certificate PEM file to verify server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+#ssl_key_password = <None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If not set,
+# we fall back to the same configuration used for RPC. (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
+# be used. This option may notbe available in future versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its replies.
+# This value should not be longer than rpc_response_timeout. (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we are
+# currently connected to becomes unavailable. Takes effect only if more than
+# one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
+
+# The RabbitMQ broker address where a single node is used. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+#rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/rabbit_port
+#rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+#rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+#rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+#rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
+# (integer value)
+#rabbit_interval_max = 30
+
+# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
+# count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
+# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
+# is no longer controlled by the x-ha-policy argument when declaring a queue.
+# If you just want to make sure that all queues (except  those with auto-
+# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
+# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL (x-expires).
+# Queues which are unused for the duration of the TTL are automatically
+# deleted. The parameter affects only reply and fanout queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
+# value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point
+# value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error (floating
+# point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`. (integer
+# value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available (integer
+# value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are considered stale
+# in seconds or None for no staleness. Stale connections are closed on acquire.
+# (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc
+# listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc reply
+# listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# reply. -1 means infinite retry during rpc_timeout (integer value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during sending RPC
+# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc
+# request could be processed more then one time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending RPC
+# message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched.  Missing or empty directories are ignored. (multi
+# valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[service_credentials]
+auth_type = password
+auth_url = http://localhost:5000/v3
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = ceilometer
+password = password
+interface = internalURL
+region_name = RegionOne
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/keystone.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/keystone.conf.j2
new file mode 100644
index 0000000..fe6d4fc
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/keystone.conf.j2
@@ -0,0 +1,2100 @@
+[DEFAULT]
+
+#
+# From keystone
+#
+
+# A "shared secret" that can be used to bootstrap Keystone. This "token" does
+# not represent a user, and carries no explicit authorization. If set to
+# `None`, the value is ignored and the `admin_token` log in mechanism is
+# effectively disabled. To completely disable `admin_token` in production
+# (highly recommended), remove AdminTokenAuthMiddleware from your paste
+# application pipelines (for example, in keystone-paste.ini). (string value)
+#admin_token = <None>
+admin_token = ADMIN_TOKEN
+
+# The base public endpoint URL for Keystone that is advertised to clients
+# (NOTE: this does NOT affect how Keystone listens for connections). Defaults
+# to the base host URL of the request. E.g. a request to
+# http://server:5000/v3/users will default to http://server:5000. You should
+# only need to set this value if the base URL contains a path (e.g. /prefix/v3)
+# or the endpoint should be found on a different server. (string value)
+#public_endpoint = <None>
+
+# The base admin endpoint URL for Keystone that is advertised to clients (NOTE:
+# this does NOT affect how Keystone listens for connections). Defaults to the
+# base host URL of the request. E.g. a request to http://server:35357/v3/users
+# will default to http://server:35357. You should only need to set this value
+# if the base URL contains a path (e.g. /prefix/v3) or the endpoint should be
+# found on a different server. (string value)
+#admin_endpoint = <None>
+
+# Maximum depth of the project hierarchy, excluding the project acting as a
+# domain at the top of the hierarchy. WARNING: setting it to a large value may
+# adversely impact  performance. (integer value)
+#max_project_tree_depth = 5
+
+# Limit the sizes of user & project ID/names. (integer value)
+#max_param_size = 64
+
+# Similar to max_param_size, but provides an exception for token values.
+# (integer value)
+#max_token_size = 8192
+
+# Similar to the member_role_name option, this represents the default role ID
+# used to associate users with their default projects in the v2 API. This will
+# be used as the explicit role where one is not specified by the v2 API.
+# (string value)
+#member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
+
+# This is the role name used in combination with the member_role_id option; see
+# that option for more detail. (string value)
+#member_role_name = _member_
+
+# The value passed as the keyword "rounds" to passlib's encrypt method.
+# (integer value)
+# Minimum value: 1000
+# Maximum value: 100000
+#crypt_strength = 10000
+
+# The maximum number of entities that will be returned in a collection, with no
+# limit set by default. This global limit may be then overridden for a specific
+# driver, by specifying a list_limit in the appropriate section (e.g.
+# [assignment]). (integer value)
+#list_limit = <None>
+
+# Set this to false if you want to enable the ability for user, group and
+# project entities to be moved between domains by updating their domain_id.
+# Allowing such movement is not recommended if the scope of a domain admin is
+# being restricted by use of an appropriate policy file (see
+# policy.v3cloudsample as an example). This ability is deprecated and will be
+# removed in a future release. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#domain_id_immutable = true
+
+# If set to true, strict password length checking is performed for password
+# manipulation. If a password exceeds the maximum length, the operation will
+# fail with an HTTP 403 Forbidden error. If set to false, passwords are
+# automatically truncated to the maximum length. (boolean value)
+#strict_password_check = false
+
+# The HTTP header used to determine the scheme for the original request, even
+# if it was removed by an SSL terminating proxy. (string value)
+#secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO
+
+# If set to true the server will return information in the response that may
+# allow an unauthenticated or authenticated user to get more information than
+# normal, such as why authentication failed. This may be useful for debugging
+# but is insecure. (boolean value)
+#insecure_debug = false
+
+#
+# From keystone.notifications
+#
+
+# Default publisher_id for outgoing notifications (string value)
+#default_publisher_id = <None>
+
+# Define the notification format for Identity Service events. A "basic"
+# notification has information about the resource being operated on. A "cadf"
+# notification has the same information, as well as information about the
+# initiator of the event. (string value)
+# Allowed values: basic, cadf
+#notification_format = basic
+
+# Define the notification options to opt-out from. The value expected is:
+# identity.<resource_type>.<operation>. This field can be set multiple times in
+# order to add more notifications to opt-out from. For example:
+#  notification_opt_out=identity.user.created
+#  notification_opt_out=identity.authenticate.success (multi valued)
+#notification_opt_out =
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+log_dir = /var/log/keystone
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
+
+# Type of concurrency used. Either "native" or "eventlet" (string value)
+#rpc_zmq_concurrency = eventlet
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no linger
+# period. Pending messages shall be discarded immediately when the socket is
+# closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
+
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target
+# ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = true
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full configuration. If
+# not set, we fall back to the rpc_backend option and driver specific
+# configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers include amqp
+# and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange = keystone
+
+#
+# From oslo.service.service
+#
+
+# Enable eventlet backdoor.  Acceptable values are 0, <port>, and
+# <start>:<end>, where 0 results in listening on a random tcp port number;
+# <port> results in listening on the specified port number (and not enabling
+# backdoor if that port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range of port numbers.
+# The chosen port is displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+# Enable eventlet backdoor, using the provided path as a unix socket that can
+# receive connections. This option is mutually exclusive with 'backdoor_port'
+# in that only one should be provided. If both are provided then the existence
+# of this option overrides the usage of that option. (string value)
+#backdoor_socket = <None>
+
+# Enables or disables logging values of all registered options when starting a
+# service (at DEBUG level). (boolean value)
+#log_options = true
+
+# Specify a timeout after which a gracefully shutdown server will exit. Zero
+# value means endless wait. (integer value)
+#graceful_shutdown_timeout = 60
+
+
+[assignment]
+
+#
+# From keystone
+#
+
+# Entrypoint for the assignment backend driver in the keystone.assignment
+# namespace. Only an SQL driver is supplied. If an assignment driver is not
+# specified, the identity driver will choose the assignment driver (driver
+# selection based on `[identity]/driver` option is deprecated and will be
+# removed in the "O" release). (string value)
+#driver = <None>
+
+# A list of role names which are prohibited from being an implied role. (list
+# value)
+#prohibited_implied_role = admin
+
+
+[auth]
+
+#
+# From keystone
+#
+
+# Allowed authentication methods. (list value)
+#methods = external,password,token,oauth1
+
+# Entrypoint for the password auth plugin module in the keystone.auth.password
+# namespace. (string value)
+#password = <None>
+
+# Entrypoint for the token auth plugin module in the keystone.auth.token
+# namespace. (string value)
+#token = <None>
+
+# Entrypoint for the external (REMOTE_USER) auth plugin module in the
+# keystone.auth.external namespace. Supplied drivers are DefaultDomain and
+# Domain. The default driver is DefaultDomain. (string value)
+#external = <None>
+
+# Entrypoint for the oAuth1.0 auth plugin module in the keystone.auth.oauth1
+# namespace. (string value)
+#oauth1 = <None>
+
+
+[cache]
+
+#
+# From oslo.cache
+#
+
+# Prefix for building the configuration dictionary for the cache region. This
+# should not need to be changed unless there is another dogpile.cache region
+# with the same configuration name. (string value)
+#config_prefix = cache.oslo
+
+# Default TTL, in seconds, for any cached item in the dogpile.cache region.
+# This applies to any cached method that doesn't have an explicit cache
+# expiration time defined for it. (integer value)
+#expiration_time = 600
+
+# Dogpile.cache backend module. It is recommended that Memcache with pooling
+# (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be used in
+# production deployments.  Small workloads (single process) like devstack can
+# use the dogpile.cache.memory backend. (string value)
+#backend = dogpile.cache.null
+
+# Arguments supplied to the backend module. Specify this option once per
+# argument to be passed to the dogpile.cache backend. Example format:
+# "<argname>:<value>". (multi valued)
+#backend_argument =
+
+# Proxy classes to import that will affect the way the dogpile.cache backend
+# functions. See the dogpile.cache documentation on changing-backend-behavior.
+# (list value)
+#proxies =
+
+# Global toggle for caching. (boolean value)
+#enabled = false
+
+# Extra debugging from the cache backend (cache keys, get/set/delete/etc
+# calls). This is only really useful if you need to see the specific cache-
+# backend get/set/delete calls with the keys/values.  Typically this should be
+# left set to false. (boolean value)
+#debug_cache_backend = false
+
+# Memcache servers in the format of "host:port". (dogpile.cache.memcache and
+# oslo_cache.memcache_pool backends only). (list value)
+#memcache_servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is tried
+# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (integer value)
+#memcache_dead_retry = 300
+
+# Timeout in seconds for every call to a server. (dogpile.cache.memcache and
+# oslo_cache.memcache_pool backends only). (integer value)
+#memcache_socket_timeout = 3
+
+# Max total number of open connections to every memcached server.
+# (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the pool before
+# it is closed. (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache client
+# connection. (integer value)
+#memcache_pool_connection_get_timeout = 10
+
+
+[catalog]
+
+#
+# From keystone
+#
+
+# Catalog template file name for use with the template catalog backend. (string
+# value)
+#template_file = default_catalog.templates
+
+# Entrypoint for the catalog backend driver in the keystone.catalog namespace.
+# Supplied drivers are kvs, sql, templated, and endpoint_filter.sql (string
+# value)
+#driver = sql
+
+# Toggle for catalog caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache catalog data (in seconds). This has no effect unless global and
+# catalog caching are enabled. (integer value)
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a catalog collection.
+# (integer value)
+#list_limit = <None>
+
+
+[cors]
+
+#
+# From oslo.middleware
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name
+
+
+[credential]
+
+#
+# From keystone
+#
+
+# Entrypoint for the credential backend driver in the keystone.credential
+# namespace. (string value)
+#driver = sql
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+#connection = sqlite:////var/lib/keystone/keystone.db
+
+connection = mysql+pymysql://keystone:password@localhost/keystone
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+
+[domain_config]
+
+#
+# From keystone
+#
+
+# Entrypoint for the domain config backend driver in the
+# keystone.resource.domain_config namespace. (string value)
+#driver = sql
+
+# Toggle for domain config caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# TTL (in seconds) to cache domain config data. This has no effect unless
+# domain config caching is enabled. (integer value)
+#cache_time = 300
+
+
+[endpoint_filter]
+
+#
+# From keystone
+#
+
+# Entrypoint for the endpoint filter backend driver in the
+# keystone.endpoint_filter namespace. (string value)
+#driver = sql
+
+# Toggle to return all active endpoints if no filter exists. (boolean value)
+#return_all_endpoints_if_no_filter = true
+
+
+[endpoint_policy]
+
+#
+# From keystone
+#
+
+# Enable endpoint_policy functionality. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: The option to enable the OS-ENDPOINT-POLICY extension has been
+# deprecated in the M release and will be removed in the O release. The OS-
+# ENDPOINT-POLICY extension will be enabled by default.
+#enabled = true
+
+# Entrypoint for the endpoint policy backend driver in the
+# keystone.endpoint_policy namespace. (string value)
+#driver = sql
+
+
+[eventlet_server]
+
+#
+# From keystone
+#
+
+# The number of worker processes to serve the public eventlet application.
+# Defaults to number of CPUs (minimum of 2). (integer value)
+# Deprecated group/name - [DEFAULT]/public_workers
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#public_workers = <None>
+
+# The number of worker processes to serve the admin eventlet application.
+# Defaults to number of CPUs (minimum of 2). (integer value)
+# Deprecated group/name - [DEFAULT]/admin_workers
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#admin_workers = <None>
+
+# The IP address of the network interface for the public service to listen on.
+# (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+# Deprecated group/name - [DEFAULT]/public_bind_host
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#public_bind_host = 0.0.0.0
+
+# The port number which the public service listens on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/public_port
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#public_port = 5000
+
+# The IP address of the network interface for the admin service to listen on.
+# (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+# Deprecated group/name - [DEFAULT]/admin_bind_host
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#admin_bind_host = 0.0.0.0
+
+# The port number which the admin service listens on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/admin_port
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#admin_port = 35357
+
+# If set to false, disables keepalives on the server; all connections will be
+# closed after serving one request. (boolean value)
+#wsgi_keep_alive = true
+
+# Timeout for socket operations on a client connection. If an incoming
+# connection is idle for this number of seconds it will be closed. A value of
+# "0" means wait forever. (integer value)
+#client_socket_timeout = 900
+
+# Set this to true if you want to enable TCP_KEEPALIVE on server sockets, i.e.
+# sockets used by the Keystone wsgi server for client connections. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/tcp_keepalive
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#tcp_keepalive = false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Only
+# applies if tcp_keepalive is true. Ignored if system does not support it.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/tcp_keepidle
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#tcp_keepidle = 600
+
+
+[eventlet_server_ssl]
+
+#
+# From keystone
+#
+
+# Toggle for SSL support on the Keystone eventlet servers. (boolean value)
+# Deprecated group/name - [ssl]/enable
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#enable = false
+
+# Path of the certfile for SSL. For non-production environments, you may be
+# interested in using `keystone-manage ssl_setup` to generate self-signed
+# certificates. (string value)
+# Deprecated group/name - [ssl]/certfile
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#certfile = /etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+# Deprecated group/name - [ssl]/keyfile
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#keyfile = /etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the CA cert file for SSL. (string value)
+# Deprecated group/name - [ssl]/ca_certs
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#ca_certs = /etc/keystone/ssl/certs/ca.pem
+
+# Require client certificate. (boolean value)
+# Deprecated group/name - [ssl]/cert_required
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#cert_required = false
+
+
+[federation]
+
+#
+# From keystone
+#
+
+# Entrypoint for the federation backend driver in the keystone.federation
+# namespace. (string value)
+#driver = sql
+
+# Value to be used when filtering assertion parameters from the environment.
+# (string value)
+#assertion_prefix =
+
+# Value to be used to obtain the entity ID of the Identity Provider from the
+# environment (e.g. if using the mod_shib plugin this value is `Shib-Identity-
+# Provider`). (string value)
+#remote_id_attribute = <None>
+
+# A domain name that is reserved to allow federated ephemeral users to have a
+# domain concept. Note that an admin will not be able to create a domain with
+# this name or update an existing domain to this name. You are not advised to
+# change this value unless you really have to. (string value)
+#federated_domain_name = Federated
+
+# A list of trusted dashboard hosts. Before accepting a Single Sign-On request
+# to return a token, the origin host must be a member of the trusted_dashboard
+# list. This configuration option may be repeated for multiple values. For
+# example: trusted_dashboard=http://acme.com/auth/websso
+# trusted_dashboard=http://beta.com/auth/websso (multi valued)
+#trusted_dashboard =
+
+# Location of Single Sign-On callback handler, will return a token to a trusted
+# dashboard host. (string value)
+#sso_callback_template = /etc/keystone/sso_callback_template.html
+
+
+[fernet_tokens]
+
+#
+# From keystone
+#
+
+# Directory containing Fernet token keys. (string value)
+#key_repository = /etc/keystone/fernet-keys/
+
+# This controls how many keys are held in rotation by keystone-manage
+# fernet_rotate before they are discarded. The default value of 3 means that
+# keystone will maintain one staged key, one primary key, and one secondary
+# key. Increasing this value means that additional secondary keys will be kept
+# in the rotation. (integer value)
+#max_active_keys = 3
+
+
+[identity]
+
+#
+# From keystone
+#
+
+# This references the domain to use for all Identity API v2 requests (which are
+# not aware of domains). A domain with this ID will be created for you by
+# keystone-manage db_sync in migration 008. The domain referenced by this ID
+# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API.
+# There is nothing special about this domain, other than the fact that it must
+# exist to order to maintain support for your v2 clients. (string value)
+#default_domain_id = default
+
+# A subset (or all) of domains can have their own identity driver, each with
+# their own partial configuration options, stored in either the resource
+# backend or in a file in a domain configuration directory (depending on the
+# setting of domain_configurations_from_database). Only values specific to the
+# domain need to be specified in this manner. This feature is disabled by
+# default; set to true to enable. (boolean value)
+#domain_specific_drivers_enabled = false
+
+# Extract the domain specific configuration options from the resource backend
+# where they have been stored with the domain data. This feature is disabled by
+# default (in which case the domain specific options will be loaded from files
+# in the domain configuration directory); set to true to enable. (boolean
+# value)
+#domain_configurations_from_database = false
+
+# Path for Keystone to locate the domain specific identity configuration files
+# if domain_specific_drivers_enabled is set to true. (string value)
+#domain_config_dir = /etc/keystone/domains
+
+# Entrypoint for the identity backend driver in the keystone.identity
+# namespace. Supplied drivers are ldap and sql. (string value)
+#driver = sql
+
+# Toggle for identity caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache identity data (in seconds). This has no effect unless global
+# and identity caching are enabled. (integer value)
+#cache_time = 600
+
+# Maximum supported length for user passwords; decrease to improve performance.
+# (integer value)
+# Maximum value: 4096
+#max_password_length = 4096
+
+# Maximum number of entities that will be returned in an identity collection.
+# (integer value)
+#list_limit = <None>
+
+
+[identity_mapping]
+
+#
+# From keystone
+#
+
+# Entrypoint for the identity mapping backend driver in the
+# keystone.identity.id_mapping namespace. (string value)
+#driver = sql
+
+# Entrypoint for the public ID generator for user and group entities in the
+# keystone.identity.id_generator namespace. The Keystone identity mapper only
+# supports generators that produce no more than 64 characters. (string value)
+#generator = sha256
+
+# The format of user and group IDs changed in Juno for backends that do not
+# generate UUIDs (e.g. LDAP), with keystone providing a hash mapping to the
+# underlying attribute in LDAP. By default this mapping is disabled, which
+# ensures that existing IDs will not change. Even when the mapping is enabled
+# by using domain specific drivers, any users and groups from the default
+# domain being handled by LDAP will still not be mapped to ensure their IDs
+# remain backward compatible. Setting this value to False will enable the
+# mapping for even the default LDAP driver. It is only safe to do this if you
+# do not already have assignments for users and groups from the default LDAP
+# domain, and it is acceptable for Keystone to provide the different IDs to
+# clients than it did previously. Typically this means that the only time you
+# can set this value to False is when configuring a fresh installation.
+# (boolean value)
+#backward_compatible_ids = true
+
+
+[kvs]
+
+#
+# From keystone
+#
+
+# Extra dogpile.cache backend modules to register with the dogpile.cache
+# library. (list value)
+#backends =
+
+# Prefix for building the configuration dictionary for the KVS region. This
+# should not need to be changed unless there is another dogpile.cache region
+# with the same configuration name. (string value)
+#config_prefix = keystone.kvs
+
+# Toggle to disable using a key-mangling function to ensure fixed length keys.
+# This is toggle-able for debugging purposes, it is highly recommended to
+# always leave this set to true. (boolean value)
+#enable_key_mangler = true
+
+# Default lock timeout (in seconds) for distributed locking. (integer value)
+#default_lock_timeout = 5
+
+
+[ldap]
+
+#
+# From keystone
+#
+
+# URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified
+# as a comma separated string. The first URL to successfully bind is used for
+# the connection. (string value)
+#url = ldap://localhost
+
+# User BindDN to query the LDAP server. (string value)
+#user = <None>
+
+# Password for the BindDN to query the LDAP server. (string value)
+#password = <None>
+
+# LDAP server suffix (string value)
+#suffix = cn=example,cn=com
+
+# If true, will add a dummy member to groups. This is required if the
+# objectclass for groups requires the "member" attribute. (boolean value)
+#use_dumb_member = false
+
+# DN of the "dummy member" to use when "use_dumb_member" is enabled. (string
+# value)
+#dumb_member = cn=dumb,dc=nonexistent
+
+# Delete subtrees using the subtree delete control. Only enable this option if
+# your LDAP server supports subtree deletion. (boolean value)
+#allow_subtree_delete = false
+
+# The LDAP scope for queries, "one" represents oneLevel/singleLevel and "sub"
+# represents subtree/wholeSubtree options. (string value)
+# Allowed values: one, sub
+#query_scope = one
+
+# Maximum results per page; a value of zero ("0") disables paging. (integer
+# value)
+#page_size = 0
+
+# The LDAP dereferencing option for queries. The "default" option falls back to
+# using default dereferencing configured by your ldap.conf. (string value)
+# Allowed values: never, searching, always, finding, default
+#alias_dereferencing = default
+
+# Sets the LDAP debugging level for LDAP calls. A value of 0 means that
+# debugging is not enabled. This value is a bitmask, consult your LDAP
+# documentation for possible values. (integer value)
+#debug_level = <None>
+
+# Override the system's default referral chasing behavior for queries. (boolean
+# value)
+#chase_referrals = <None>
+
+# Search base for users. Defaults to the suffix value. (string value)
+#user_tree_dn = <None>
+
+# LDAP search filter for users. (string value)
+#user_filter = <None>
+
+# LDAP objectclass for users. (string value)
+#user_objectclass = inetOrgPerson
+
+# LDAP attribute mapped to user id. WARNING: must not be a multivalued
+# attribute. (string value)
+#user_id_attribute = cn
+
+# LDAP attribute mapped to user name. (string value)
+#user_name_attribute = sn
+
+# LDAP attribute mapped to user description. (string value)
+#user_description_attribute = description
+
+# LDAP attribute mapped to user email. (string value)
+#user_mail_attribute = mail
+
+# LDAP attribute mapped to password. (string value)
+#user_pass_attribute = userPassword
+
+# LDAP attribute mapped to user enabled flag. (string value)
+#user_enabled_attribute = enabled
+
+# Invert the meaning of the boolean enabled values. Some LDAP servers use a
+# boolean lock attribute where "true" means an account is disabled. Setting
+# "user_enabled_invert = true" will allow these lock attributes to be used.
+# This setting will have no effect if "user_enabled_mask" or
+# "user_enabled_emulation" settings are in use. (boolean value)
+#user_enabled_invert = false
+
+# Bitmask integer to indicate the bit that the enabled value is stored in if
+# the LDAP server represents "enabled" as a bit on an integer rather than a
+# boolean. A value of "0" indicates the mask is not used. If this is not set to
+# "0" the typical value is "2". This is typically used when
+# "user_enabled_attribute = userAccountControl". (integer value)
+#user_enabled_mask = 0
+
+# Default value to enable users. This should match an appropriate int value if
+# the LDAP server uses non-boolean (bitmask) values to indicate if a user is
+# enabled or disabled. If this is not set to "True" the typical value is "512".
+# This is typically used when "user_enabled_attribute = userAccountControl".
+# (string value)
+#user_enabled_default = True
+
+# List of attributes stripped off the user on update. (list value)
+#user_attribute_ignore = default_project_id
+
+# LDAP attribute mapped to default_project_id for users. (string value)
+#user_default_project_id_attribute = <None>
+
+# Allow user creation in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#user_allow_create = true
+
+# Allow user updates in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#user_allow_update = true
+
+# Allow user deletion in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#user_allow_delete = true
+
+# If true, Keystone uses an alternative method to determine if a user is
+# enabled or not by checking if they are a member of the
+# "user_enabled_emulation_dn" group. (boolean value)
+#user_enabled_emulation = false
+
+# DN of the group entry to hold enabled users when using enabled emulation.
+# (string value)
+#user_enabled_emulation_dn = <None>
+
+# Use the "group_member_attribute" and "group_objectclass" settings to
+# determine membership in the emulated enabled group. (boolean value)
+#user_enabled_emulation_use_group_config = false
+
+# List of additional LDAP attributes used for mapping additional attribute
+# mappings for users. Attribute mapping format is <ldap_attr>:<user_attr>,
+# where ldap_attr is the attribute in the LDAP entry and user_attr is the
+# Identity API attribute. (list value)
+#user_additional_attribute_mapping =
+
+# Search base for groups. Defaults to the suffix value. (string value)
+#group_tree_dn = <None>
+
+# LDAP search filter for groups. (string value)
+#group_filter = <None>
+
+# LDAP objectclass for groups. (string value)
+#group_objectclass = groupOfNames
+
+# LDAP attribute mapped to group id. (string value)
+#group_id_attribute = cn
+
+# LDAP attribute mapped to group name. (string value)
+#group_name_attribute = ou
+
+# LDAP attribute mapped to show group membership. (string value)
+#group_member_attribute = member
+
+# LDAP attribute mapped to group description. (string value)
+#group_desc_attribute = description
+
+# List of attributes stripped off the group on update. (list value)
+#group_attribute_ignore =
+
+# Allow group creation in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#group_allow_create = true
+
+# Allow group update in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#group_allow_update = true
+
+# Allow group deletion in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
+#group_allow_delete = true
+
+# Additional attribute mappings for groups. Attribute mapping format is
+# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry
+# and user_attr is the Identity API attribute. (list value)
+#group_additional_attribute_mapping =
+
+# CA certificate file path for communicating with LDAP servers. (string value)
+#tls_cacertfile = <None>
+
+# CA certificate directory path for communicating with LDAP servers. (string
+# value)
+#tls_cacertdir = <None>
+
+# Enable TLS for communicating with LDAP servers. (boolean value)
+#use_tls = false
+
+# Specifies what checks to perform on client certificates in an incoming TLS
+# session. (string value)
+# Allowed values: demand, never, allow
+#tls_req_cert = demand
+
+# Enable LDAP connection pooling. (boolean value)
+#use_pool = true
+
+# Connection pool size. (integer value)
+#pool_size = 10
+
+# Maximum count of reconnect trials. (integer value)
+#pool_retry_max = 3
+
+# Time span in seconds to wait between two reconnect trials. (floating point
+# value)
+#pool_retry_delay = 0.1
+
+# Connector timeout in seconds. Value -1 indicates indefinite wait for
+# response. (integer value)
+#pool_connection_timeout = -1
+
+# Connection lifetime in seconds. (integer value)
+#pool_connection_lifetime = 600
+
+# Enable LDAP connection pooling for end user authentication. If use_pool is
+# disabled, then this setting is meaningless and is not used at all. (boolean
+# value)
+#use_auth_pool = true
+
+# End user auth connection pool size. (integer value)
+#auth_pool_size = 100
+
+# End user auth connection lifetime in seconds. (integer value)
+#auth_pool_connection_lifetime = 60
+
+# If the members of the group objectclass are user IDs rather than DNs, set
+# this to true. This is the case when using posixGroup as the group objectclass
+# and OpenDirectory. (boolean value)
+#group_members_are_ids = false
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host = 127.0.0.1
+
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
+
+# Password for Redis server (optional). (string value)
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
+
+
+[memcache]
+
+#
+# From keystone
+#
+
+# Memcache servers in the format of "host:port". (list value)
+#servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is tried
+# again. This is used by the key value store system (e.g. token pooled
+# memcached persistence backend). (integer value)
+#dead_retry = 300
+
+# Timeout in seconds for every call to a server. This is used by the key value
+# store system (e.g. token pooled memcached persistence backend). (integer
+# value)
+#socket_timeout = 3
+
+# Max total number of open connections to every memcached server. This is used
+# by the key value store system (e.g. token pooled memcached persistence
+# backend). (integer value)
+#pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the pool before
+# it is closed. This is used by the key value store system (e.g. token pooled
+# memcached persistence backend). (integer value)
+#pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache client
+# connection. This is used by the key value store system (e.g. token pooled
+# memcached persistence backend). (integer value)
+#pool_connection_get_timeout = 10
+
+
+[oauth1]
+
+#
+# From keystone
+#
+
+# Entrypoint for the OAuth backend driver in the keystone.oauth1 namespace.
+# (string value)
+#driver = sql
+
+# Duration (in seconds) for the OAuth Request Token. (integer value)
+#request_token_duration = 28800
+
+# Duration (in seconds) for the OAuth Access Token. (integer value)
+#access_token_duration = 86400
+
+
+[os_inherit]
+
+#
+# From keystone
+#
+
+# role-assignment inheritance to projects from owning domain or from projects
+# higher in the hierarchy can be optionally disabled. In the future, this
+# option will be removed and the hierarchy will be always enabled. (boolean
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: The option to enable the OS-INHERIT extension has been deprecated in
+# the M release and will be removed in the O release. The OS-INHERIT extension
+# will be enabled by default.
+#enabled = true
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+#group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+#trace = false
+
+# CA certificate PEM file to verify server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+#ssl_key_password = <None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If not set,
+# we fall back to the same configuration used for RPC. (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
+# be used. This option may notbe available in future versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its replies.
+# This value should not be longer than rpc_response_timeout. (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we are
+# currently connected to becomes unavailable. Takes effect only if more than
+# one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
+
+# The RabbitMQ broker address where a single node is used. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+#rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/rabbit_port
+#rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+#rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+#rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+#rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
+# (integer value)
+#rabbit_interval_max = 30
+
+# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
+# count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
+# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
+# is no longer controlled by the x-ha-policy argument when declaring a queue.
+# If you just want to make sure that all queues (except  those with auto-
+# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
+# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL (x-expires).
+# Queues which are unused for the duration of the TTL are automatically
+# deleted. The parameter affects only reply and fanout queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
+# value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point
+# value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error (floating
+# point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`. (integer
+# value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available (integer
+# value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are considered stale
+# in seconds or None for no staleness. Stale connections are closed on acquire.
+# (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc
+# listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc reply
+# listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# reply. -1 means infinite retry during rpc_timeout (integer value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during sending RPC
+# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc
+# request could be processed more then one time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending RPC
+# message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_middleware]
+
+#
+# From oslo.middleware
+#
+
+# The maximum body size for each  request, in bytes. (integer value)
+# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
+# Deprecated group/name - [DEFAULT]/max_request_body_size
+#max_request_body_size = 114688
+
+# The HTTP Header that will be used to determine what the original request
+# protocol scheme was, even if it was hidden by an SSL termination proxy.
+# (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#secure_proxy_ssl_header = X-Forwarded-Proto
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched.  Missing or empty directories are ignored. (multi
+# valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[paste_deploy]
+
+#
+# From keystone
+#
+
+# Name of the paste configuration file that defines the available pipelines.
+# (string value)
+#config_file = keystone-paste.ini
+
+
+[policy]
+
+#
+# From keystone
+#
+
+# Entrypoint for the policy backend driver in the keystone.policy namespace.
+# Supplied drivers are rules and sql. (string value)
+#driver = sql
+
+# Maximum number of entities that will be returned in a policy collection.
+# (integer value)
+#list_limit = <None>
+
+
+[resource]
+
+#
+# From keystone
+#
+
+# Entrypoint for the resource backend driver in the keystone.resource
+# namespace. Only an SQL driver is supplied. If a resource driver is not
+# specified, the assignment driver will choose the resource driver. (string
+# value)
+#driver = <None>
+
+# Toggle for resource caching. This has no effect unless global caching is
+# enabled. (boolean value)
+# Deprecated group/name - [assignment]/caching
+#caching = true
+
+# TTL (in seconds) to cache resource data. This has no effect unless global
+# caching is enabled. (integer value)
+# Deprecated group/name - [assignment]/cache_time
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a resource collection.
+# (integer value)
+# Deprecated group/name - [assignment]/list_limit
+#list_limit = <None>
+
+# Name of the domain that owns the `admin_project_name`. Defaults to None.
+# (string value)
+#admin_project_domain_name = <None>
+
+# Special project for performing administrative operations on remote services.
+# Tokens scoped to this project will contain the key/value
+# `is_admin_project=true`. Defaults to None. (string value)
+#admin_project_name = <None>
+
+# Whether the names of projects are restricted from containing url reserved
+# characters. If set to new, attempts to create or update a project with a url
+# unsafe name will return an error. In addition, if set to strict, attempts to
+# scope a token using an unsafe project name will return an error. (string
+# value)
+# Allowed values: off, new, strict
+#project_name_url_safe = off
+
+# Whether the names of domains are restricted from containing url reserved
+# characters. If set to new, attempts to create or update a domain with a url
+# unsafe name will return an error. In addition, if set to strict, attempts to
+# scope a token using a domain name which is unsafe will return an error.
+# (string value)
+# Allowed values: off, new, strict
+#domain_name_url_safe = off
+
+
+[revoke]
+
+#
+# From keystone
+#
+
+# Entrypoint for an implementation of the backend for persisting revocation
+# events in the keystone.revoke namespace. Supplied drivers are kvs and sql.
+# (string value)
+#driver = sql
+
+# This value (calculated in seconds) is added to token expiration before a
+# revocation event may be removed from the backend. (integer value)
+#expiration_buffer = 1800
+
+# Toggle for revocation event caching. This has no effect unless global caching
+# is enabled. (boolean value)
+#caching = true
+
+# Time to cache the revocation list and the revocation events (in seconds).
+# This has no effect unless global and token caching are enabled. (integer
+# value)
+# Deprecated group/name - [token]/revocation_cache_time
+#cache_time = 3600
+
+
+[role]
+
+#
+# From keystone
+#
+
+# Entrypoint for the role backend driver in the keystone.role namespace.
+# Supplied drivers are ldap and sql. (string value)
+#driver = <None>
+
+# Toggle for role caching. This has no effect unless global caching is enabled.
+# (boolean value)
+#caching = true
+
+# TTL (in seconds) to cache role data. This has no effect unless global caching
+# is enabled. (integer value)
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a role collection.
+# (integer value)
+#list_limit = <None>
+
+
+[saml]
+
+#
+# From keystone
+#
+
+# Default TTL, in seconds, for any generated SAML assertion created by
+# Keystone. (integer value)
+#assertion_expiration_time = 3600
+
+# Binary to be called for XML signing. Install the appropriate package, specify
+# absolute path or adjust your PATH environment variable if the binary cannot
+# be found. (string value)
+#xmlsec1_binary = xmlsec1
+
+# Path of the certfile for SAML signing. For non-production environments, you
+# may be interested in using `keystone-manage pki_setup` to generate self-
+# signed certificates. Note, the path cannot contain a comma. (string value)
+#certfile = /etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for SAML signing. Note, the path cannot contain a comma.
+# (string value)
+#keyfile = /etc/keystone/ssl/private/signing_key.pem
+
+# Entity ID value for unique Identity Provider identification. Usually FQDN is
+# set with a suffix. A value is required to generate IDP Metadata. For example:
+# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp (string value)
+#idp_entity_id = <None>
+
+# Identity Provider Single-Sign-On service value, required in the Identity
+# Provider's metadata. A value is required to generate IDP Metadata. For
+# example: https://keystone.example.com/v3/OS-FEDERATION/saml2/sso (string
+# value)
+#idp_sso_endpoint = <None>
+
+# Language used by the organization. (string value)
+#idp_lang = en
+
+# Organization name the installation belongs to. (string value)
+#idp_organization_name = <None>
+
+# Organization name to be displayed. (string value)
+#idp_organization_display_name = <None>
+
+# URL of the organization. (string value)
+#idp_organization_url = <None>
+
+# Company of contact person. (string value)
+#idp_contact_company = <None>
+
+# Given name of contact person (string value)
+#idp_contact_name = <None>
+
+# Surname of contact person. (string value)
+#idp_contact_surname = <None>
+
+# Email address of contact person. (string value)
+#idp_contact_email = <None>
+
+# Telephone number of contact person. (string value)
+#idp_contact_telephone = <None>
+
+# The contact type describing the main point of contact for the identity
+# provider. (string value)
+# Allowed values: technical, support, administrative, billing, other
+#idp_contact_type = other
+
+# Path to the Identity Provider Metadata file. This file should be generated
+# with the keystone-manage saml_idp_metadata command. (string value)
+#idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml
+
+# The prefix to use for the RelayState SAML attribute, used when generating ECP
+# wrapped assertions. (string value)
+#relay_state_prefix = ss:mem:
+
+
+[shadow_users]
+
+#
+# From keystone
+#
+
+# Entrypoint for the shadow users backend driver in the
+# keystone.identity.shadow_users namespace. (string value)
+#driver = sql
+
+
+[signing]
+
+#
+# From keystone
+#
+
+# Path of the certfile for token signing. For non-production environments, you
+# may be interested in using `keystone-manage pki_setup` to generate self-
+# signed certificates. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#certfile = /etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#keyfile = /etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#ca_certs = /etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key for token signing. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#ca_key = /etc/keystone/ssl/private/cakey.pem
+
+# Key size (in bits) for token signing cert (auto generated certificate).
+# (integer value)
+# Minimum value: 1024
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#key_size = 2048
+
+# Days the token signing cert is valid for (auto generated certificate).
+# (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#valid_days = 3650
+
+# Certificate subject (auto generated certificate) for token signing. (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# From keystone
+#
+
+# Path of the CA key file for SSL. (string value)
+#ca_key = /etc/keystone/ssl/private/cakey.pem
+
+# SSL key length (in bits) (auto generated certificate). (integer value)
+# Minimum value: 1024
+#key_size = 1024
+
+# Days the certificate is valid for once signed (auto generated certificate).
+# (integer value)
+#valid_days = 3650
+
+# SSL certificate subject (auto generated certificate). (string value)
+#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+
+[token]
+provider = fernet
+
+#
+# From keystone
+#
+
+# External auth mechanisms that should add bind information to token, e.g.,
+# kerberos,x509. (list value)
+#bind =
+
+# Enforcement policy on tokens presented to Keystone with bind information. One
+# of disabled, permissive, strict, required or a specifically required bind
+# mode, e.g., kerberos or x509 to require binding to that authentication.
+# (string value)
+#enforce_token_bind = permissive
+
+# Amount of time a token should remain valid (in seconds). (integer value)
+#expiration = 3600
+
+# Controls the token construction, validation, and revocation operations.
+# Entrypoint in the keystone.token.provider namespace. Core providers are
+# [fernet|pkiz|pki|uuid]. (string value)
+#provider = uuid
+
+# Entrypoint for the token persistence backend driver in the
+# keystone.token.persistence namespace. Supplied drivers are kvs, memcache,
+# memcache_pool, and sql. (string value)
+#driver = sql
+
+# Toggle for token system caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache tokens (in seconds). This has no effect unless global and token
+# caching are enabled. (integer value)
+#cache_time = <None>
+
+# Revoke token by token identifier. Setting revoke_by_id to true enables
+# various forms of enumerating tokens, e.g. `list tokens for user`. These
+# enumerations are processed to determine the list of tokens to revoke. Only
+# disable if you are switching to using the Revoke extension with a backend
+# other than KVS, which stores events in memory. (boolean value)
+#revoke_by_id = true
+
+# Allow rescoping of scoped token. Setting allow_rescoped_scoped_token to false
+# prevents a user from exchanging a scoped token for any other token. (boolean
+# value)
+#allow_rescope_scoped_token = true
+
+# The hash algorithm to use for PKI tokens. This can be set to any algorithm
+# that hashlib supports. WARNING: Before changing this value, the auth_token
+# middleware must be configured with the hash_algorithms, otherwise token
+# revocation will not be processed correctly. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
+#hash_algorithm = md5
+
+# Add roles to token that are not explicitly added, but that are linked
+# implicitly to other roles. (boolean value)
+#infer_roles = true
+
+
+[tokenless_auth]
+
+#
+# From keystone
+#
+
+# The list of trusted issuers to further filter the certificates that are
+# allowed to participate in the X.509 tokenless authorization. If the option is
+# absent then no certificates will be allowed. The naming format for the
+# attributes of a Distinguished Name(DN) must be separated by a comma and
+# contain no spaces. This configuration option may be repeated for multiple
+# values. For example: trusted_issuer=CN=john,OU=keystone,O=openstack
+# trusted_issuer=CN=mary,OU=eng,O=abc (multi valued)
+#trusted_issuer =
+
+# The protocol name for the X.509 tokenless authorization along with the option
+# issuer_attribute below can look up its corresponding mapping. (string value)
+#protocol = x509
+
+# The issuer attribute that is served as an IdP ID for the X.509 tokenless
+# authorization along with the protocol to look up its corresponding mapping.
+# It is the environment variable in the WSGI environment that references to the
+# issuer of the client certificate. (string value)
+#issuer_attribute = SSL_CLIENT_I_DN
+
+
+[trust]
+
+#
+# From keystone
+#
+
+# Delegation and impersonation features can be optionally disabled. (boolean
+# value)
+#enabled = true
+
+# Enable redelegation feature. (boolean value)
+#allow_redelegation = false
+
+# Maximum depth of trust redelegation. (integer value)
+#max_redelegation_count = 3
+
+# Entrypoint for the trust backend driver in the keystone.trust namespace.
+# (string value)
+#driver = sql
+
+[extra_headers]
+Distribution = Ubuntu
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/keystone.override.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/keystone.override.j2
new file mode 100644
index 0000000..2905494
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/keystone.override.j2
@@ -0,0 +1 @@
+manual
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/mongodb.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/mongodb.conf.j2
new file mode 100644
index 0000000..5feeba2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/mongodb.conf.j2
@@ -0,0 +1,101 @@
+# mongodb.conf
+
+# Where to store the data.
+dbpath=/var/lib/mongodb
+
+#where to log
+logpath=/var/log/mongodb/mongodb.log
+
+logappend=true
+
+bind_ip = localhost
+#port = 27017
+
+# Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
+journal=true
+smallfiles = true
+
+# Enables periodic logging of CPU utilization and I/O wait
+#cpu = true
+
+# Turn on/off security.  Off is currently the default
+#noauth = true
+#auth = true
+
+# Verbose logging output.
+#verbose = true
+
+# Inspect all client data for validity on receipt (useful for
+# developing drivers)
+#objcheck = true
+
+# Enable db quota management
+#quota = true
+
+# Set oplogging level where n is
+#   0=off (default)
+#   1=W
+#   2=R
+#   3=both
+#   7=W+some reads
+#oplog = 0
+
+# Diagnostic/debugging option
+#nocursors = true
+
+# Ignore query hints
+#nohints = true
+
+# Disable the HTTP interface (Defaults to localhost:27018).
+#nohttpinterface = true
+
+# Turns off server-side scripting.  This will result in greatly limited
+# functionality
+#noscripting = true
+
+# Turns off table scans.  Any query that would do a table scan fails.
+#notablescan = true
+
+# Disable data file preallocation.
+#noprealloc = true
+
+# Specify .ns file size for new databases.
+# nssize = <size>
+
+# Accout token for Mongo monitoring server.
+#mms-token = <token>
+
+# Server name for Mongo monitoring server.
+#mms-name = <server-name>
+
+# Ping interval for Mongo monitoring server.
+#mms-interval = <seconds>
+
+# Replication Options
+
+# in replicated mongo databases, specify here whether this is a slave or master
+#slave = true
+#source = master.example.com
+# Slave only: specify a single database to replicate
+#only = master.example.com
+# or
+#master = true
+#source = slave.example.com
+
+# Address of a server to pair with.
+#pairwith = <server:port>
+# Address of arbiter server.
+#arbiter = <server:port>
+# Automatically resync if slave data is stale
+#autoresync
+# Custom size for replication operation log.
+#oplogSize = <MB>
+# Size limit for in-memory storage of op ids.
+#opIdMem = <bytes>
+
+# SSL options
+# Enable SSL on normal ports
+#sslOnNormalPorts = true
+# SSL Key file and password
+#sslPEMKeyFile = /etc/ssl/mongodb.pem
+#sslPEMKeyPassword = pass
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/openstack.cnf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/openstack.cnf.j2
new file mode 100644
index 0000000..1aeec60
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/openstack.cnf.j2
@@ -0,0 +1,7 @@
+[mysqld] 
+bind-address = localhost 
+default-storage-engine = innodb 
+innodb_file_per_table 
+collation-server = utf8_general_ci 
+init-connect = 'SET NAMES utf8' 
+character-set-server = utf8
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/wsgi-keystone.conf.j2 b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/wsgi-keystone.conf.j2
new file mode 100644
index 0000000..e5698e2
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/mitaka-v3/templates/wsgi-keystone.conf.j2
@@ -0,0 +1,32 @@
+Listen 5000
+Listen 35357
+
+<VirtualHost *:5000>
+    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
+    WSGIProcessGroup keystone-public
+    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    ErrorLogFormat "%{cu}t %M"
+    ErrorLog /var/log/apache2/keystone.log
+    CustomLog /var/log/apache2/keystone_access.log combined
+
+    <Directory /usr/bin>
+        Require all granted
+    </Directory>
+</VirtualHost>
+
+<VirtualHost *:35357>
+    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
+    WSGIProcessGroup keystone-admin
+    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    ErrorLogFormat "%{cu}t %M"
+    ErrorLog /var/log/apache2/keystone.log
+    CustomLog /var/log/apache2/keystone_access.log combined
+
+    <Directory /usr/bin>
+        Require all granted
+    </Directory>
+</VirtualHost>
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts/ceilometer_init_script.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts/ceilometer_init_script.sh
new file mode 100755
index 0000000..df092fc
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts/ceilometer_init_script.sh
@@ -0,0 +1,8 @@
+STARTUP_PATH="/home/ubuntu/monitoring/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts"
+IP=$(ifconfig | awk '/inet addr/{print substr($2,6)}' | grep "10.0.3")
+#IP=$(ifconfig | awk '/inet addr/{print substr($2,6)}' | grep "10.11.10")
+echo $IP
+sudo rabbitmqctl add_user openstack "password"
+sudo rabbitmqctl set_permissions openstack ".*" ".*" ".*"
+python $STARTUP_PATH/update-endpoints.py --username root --password password --host localhost --endpoint $IP --endpoint-type public
+python $STARTUP_PATH/update-endpoints.py --username root --password password --host localhost --endpoint $IP --endpoint-type admin
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts/update-endpoints.py b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts/update-endpoints.py
new file mode 100755
index 0000000..fa8bf2b
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts/update-endpoints.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python
+
+__author__      = 'Matt Fischer <matt.fischer@twcable.com>'
+__copyright__   = 'Copyright 2013, Matt Fischer'
+
+"""
+Update the endpoints in a keystone db using mysql
+"""
+
+import MySQLdb
+import argparse
+import urlparse
+import sys
+
+# a12ab673016d40da
+
+def main(dbhost, username, password, new_endpoint, endpoint_type):
+    db = MySQLdb.connect(host=dbhost, user=username, passwd=password,
+            db="keystone")
+    cur = db.cursor()
+    cur.execute("select id, url from endpoint where interface='%s'" % endpoint_type)
+    for row in cur.fetchall():
+        url = str(row[1])
+        endpoint_id = str(row[0])
+        try:
+            u = urlparse.urlparse(url)
+            print "Changing %s to %s in URL %s" % (u.hostname,new_endpoint, url)
+            urlstring = "%s://%s:%s%s" % (u.scheme, new_endpoint, u.port,
+                u.path)
+            cur.execute("""UPDATE endpoint
+                            SET url=%s
+                            WHERE id=%s
+                            """, (urlstring, endpoint_id))
+        except Exception as e:
+            print "Could not parse URL, giving up: %s (%s)" % (url, e)
+            cur.close()
+            db.close()
+            sys.exit(1)
+    db.commit()
+    cur.close()
+    db.close()
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--username", help="database username", required=True)
+    parser.add_argument("--password", help="database password", required=True)
+    parser.add_argument("--host", help="database host", required=True)
+    parser.add_argument("--endpoint", help="endpoint to move the public endpoints to", required=True)
+    parser.add_argument("--endpoint-type", help="which type of endpoint to modify", required=True, choices=['public','internal','admin'])
+    args = parser.parse_args()
+    main(args.host, args.username, args.password, args.endpoint, args.endpoint_type)
diff --git a/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts/zxceilostartup.sh b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts/zxceilostartup.sh
new file mode 100755
index 0000000..3bb04e7
--- /dev/null
+++ b/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts/zxceilostartup.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+sudo sed -i "s/.*127.0.0.1.*/127.0.0.1 localhost $(hostname)/" /etc/hosts
+STARTUP_PATH=/home/ubuntu/monitoring/xos/synchronizer/ceilometer/ceilometer_service_custom_image/startup-scripts
+PUB_SUB_PATH=/home/ubuntu/monitoring/xos/synchronizer/ceilometer/ceilometer_pub_sub
+$STARTUP_PATH/ceilometer_init_script.sh
+echo $PWD
+cd $PUB_SUB_PATH
+sleep 5
+chmod +x sub_main.py
+nohup ./sub_main.py &
+echo $PWD
+cd -
+echo $PWD
diff --git a/xos/synchronizer/ceilometer/pipeline_agent_module/README b/xos/synchronizer/ceilometer/pipeline_agent_module/README
new file mode 100644
index 0000000..a37ccc3
--- /dev/null
+++ b/xos/synchronizer/ceilometer/pipeline_agent_module/README
@@ -0,0 +1,41 @@
+Dynamic Pipeline-Agent Module:
+1.Packages :
+        pika
+        yaml
+        subprocess
+        logging
+        operator
+        json
+        ConfigParser
+
+ package can be installed using the command:
+  -> pip install pika
+ Remaing packages will come by default with OS package or can be installed
+ using command
+ -> sudo apt-get <package-name>
+
+2.Files:
+a. utils.py: Consists of utility function for parsing and updating pipeline.yaml
+b. pipeline.yaml:Sample pipeline.yaml file with minimum source and sink information tags,
+c. pipeline.py: Does validation of pipeline.yaml configuration.
+d. pipeline_agent.py: Main file of the module while will listen on Rabbitmq exchange "pubsub"
+e. pipeline_agent.conf : Conf file should consist of the following information:
+                       i.Rabbitmq server datails(host,port,username,passwd)
+                       ii.LOGGING info(logging level,file name)
+                       iii.Ceilometer services needed to be restarted after pipeline.yaml changes.
+f. README
+
+3.To run the module:
+  ->sudo python pipeline_agent.py
+
+4.Format to send conf msg to the module:
+  i.For updating conf :
+    -> msg={"sub_info":sub_info,"target":target,"action":"ADD"}
+  ii.for deleting conf :
+    -> msg={"sub_info":sub_info,"target":target,"action":"DEL"}
+
+     The above two msgs should be in json fomrat and should send to same rabbitmq-server where pipeline_agent.py is running
+     with "pubsub"  exchage.
+     ex:
+        sub_info  = ["cpu_util", "memory"]
+        target = "kafka://1.2.3.2:18"
diff --git a/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline.py b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline.py
new file mode 100644
index 0000000..17690f9
--- /dev/null
+++ b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline.py
@@ -0,0 +1,229 @@
+import abc
+import six
+import yaml
+import os
+import logging
+
+class PipelineException(Exception):
+    def __init__(self, message, pipeline_cfg):
+        self.msg = message
+        self.pipeline_cfg = pipeline_cfg
+
+    def __str__(self):
+        return 'Pipeline %s: %s' % (self.pipeline_cfg, self.msg)
+
+
+class Source(object):
+    """Represents a source of samples or events."""
+
+    def __init__(self, cfg):
+        self.cfg = cfg
+
+        try:
+            self.name = cfg['name']
+            self.sinks = cfg.get('sinks')
+        except KeyError as err:
+            raise PipelineException(
+                "Required field %s not specified" % err.args[0], cfg)
+
+    def __str__(self):
+        return self.name
+
+    def check_sinks(self, sinks):
+        if not self.sinks:
+            raise PipelineException(
+                "No sink defined in source %s" % self,
+                self.cfg)
+        for sink in self.sinks:
+            if sink not in sinks:
+                raise PipelineException(
+                    "Dangling sink %s from source %s" % (sink, self),
+                    self.cfg)
+    def check_source_filtering(self, data, d_type):
+        """Source data rules checking
+
+        - At least one meaningful datapoint exist
+        - Included type and excluded type can't co-exist on the same pipeline
+        - Included type meter and wildcard can't co-exist at same pipeline
+        """
+        if not data:
+            raise PipelineException('No %s specified' % d_type, self.cfg)
+
+        if ([x for x in data if x[0] not in '!*'] and
+           [x for x in data if x[0] == '!']):
+            raise PipelineException(
+                'Both included and excluded %s specified' % d_type,
+                self.cfg)
+
+        if '*' in data and [x for x in data if x[0] not in '!*']:
+            raise PipelineException(
+                'Included %s specified with wildcard' % d_type,
+                self.cfg)
+
+    @staticmethod
+    def is_supported(dataset, data_name):
+        # Support wildcard like storage.* and !disk.*
+        # Start with negation, we consider that the order is deny, allow
+        if any(fnmatch.fnmatch(data_name, datapoint[1:])
+               for datapoint in dataset if datapoint[0] == '!'):
+            return False
+
+        if any(fnmatch.fnmatch(data_name, datapoint)
+               for datapoint in dataset if datapoint[0] != '!'):
+            return True
+
+        # if we only have negation, we suppose the default is allow
+        return all(datapoint.startswith('!') for datapoint in dataset)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Pipeline(object):
+    """Represents a coupling between a sink and a corresponding source."""
+
+    def __init__(self, source, sink):
+        self.source = source
+        self.sink = sink
+        self.name = str(self)
+
+    def __str__(self):
+        return (self.source.name if self.source.name == self.sink.name
+                else '%s:%s' % (self.source.name, self.sink.name))
+
+
+class SamplePipeline(Pipeline):
+    """Represents a pipeline for Samples."""
+
+    def get_interval(self):
+        return self.source.interval
+
+class SampleSource(Source):
+    """Represents a source of samples.
+
+    In effect it is a set of pollsters and/or notification handlers emitting
+    samples for a set of matching meters. Each source encapsulates meter name
+    matching, polling interval determination, optional resource enumeration or
+    discovery, and mapping to one or more sinks for publication.
+    """
+
+    def __init__(self, cfg):
+        super(SampleSource, self).__init__(cfg)
+        try:
+            try:
+                self.interval = int(cfg['interval'])
+            except ValueError:
+                raise PipelineException("Invalid interval value", cfg)
+            # Support 'counters' for backward compatibility
+            self.meters = cfg.get('meters', cfg.get('counters'))
+        except KeyError as err:
+            raise PipelineException(
+                "Required field %s not specified" % err.args[0], cfg)
+        if self.interval <= 0:
+            raise PipelineException("Interval value should > 0", cfg)
+
+        self.resources = cfg.get('resources') or []
+        if not isinstance(self.resources, list):
+            raise PipelineException("Resources should be a list", cfg)
+
+        self.discovery = cfg.get('discovery') or []
+        if not isinstance(self.discovery, list):
+            raise PipelineException("Discovery should be a list", cfg)
+        self.check_source_filtering(self.meters, 'meters')
+
+    def support_meter(self, meter_name):
+        return self.is_supported(self.meters, meter_name)
+
+
+class Sink(object):
+
+    def __init__(self, cfg, transformer_manager):
+        self.cfg = cfg
+
+        try:
+            self.name = cfg['name']
+            # It's legal to have no transformer specified
+            self.transformer_cfg = cfg.get('transformers') or []
+        except KeyError as err:
+            raise PipelineException(
+                "Required field %s not specified" % err.args[0], cfg)
+
+        if not cfg.get('publishers'):
+            raise PipelineException("No publisher specified", cfg)
+
+      
+
+class SampleSink(Sink):
+    def Testfun(self):
+        pass
+      
+
+SAMPLE_TYPE = {'pipeline': SamplePipeline,
+               'source': SampleSource,
+               'sink': SampleSink}
+
+
+class PipelineManager(object):
+
+     def __init__(self, cfg, transformer_manager, p_type=SAMPLE_TYPE):
+        self.pipelines = []
+        if 'sources' in cfg or 'sinks' in cfg:
+            if not ('sources' in cfg and 'sinks' in cfg):
+                raise PipelineException("Both sources & sinks are required",
+                                        cfg)
+            #LOG.info(_('detected decoupled pipeline config format'))
+            logging.info("detected decoupled pipeline config format %s",cfg)
+            sources = [p_type['source'](s) for s in cfg.get('sources', [])]
+            sinks = {}
+            for s in cfg.get('sinks', []):
+                if s['name'] in sinks:
+                    raise PipelineException("Duplicated sink names: %s" %
+                                            s['name'], self)
+                else:
+                    sinks[s['name']] = p_type['sink'](s, transformer_manager)
+            for source in sources:
+                source.check_sinks(sinks)
+                for target in source.sinks:
+                    pipe = p_type['pipeline'](source, sinks[target])
+                    if pipe.name in [p.name for p in self.pipelines]:
+                        raise PipelineException(
+                            "Duplicate pipeline name: %s. Ensure pipeline"
+                            " names are unique. (name is the source and sink"
+                            " names combined)" % pipe.name, cfg)
+                    else:
+                        self.pipelines.append(pipe)
+        else:
+            #LOG.warning(_('detected deprecated pipeline config format'))
+            logging.warning("detected deprecated pipeline config format")
+            for pipedef in cfg:
+                source = p_type['source'](pipedef)
+                sink = p_type['sink'](pipedef, transformer_manager)
+                pipe = p_type['pipeline'](source, sink)
+                if pipe.name in [p.name for p in self.pipelines]:
+                    raise PipelineException(
+                        "Duplicate pipeline name: %s. Ensure pipeline"
+                        " names are unique" % pipe.name, cfg)
+                else:
+                    self.pipelines.append(pipe)
+     
+
+def _setup_pipeline_manager(cfg_file, transformer_manager, p_type=SAMPLE_TYPE):
+    if not os.path.exists(cfg_file):
+        #cfg_file = cfg.CONF.find_file(cfg_file)
+        print "File doesn't exists"   
+        return False
+
+    ##LOG.debug(_("Pipeline config file: %s"), cfg_file)
+    logging.debug("Pipeline config file: %s", cfg_file)
+
+    with open(cfg_file) as fap:
+        data = fap.read()
+
+    pipeline_cfg = yaml.safe_load(data)
+     
+    ##LOG.info(_("Pipeline config: %s"), pipeline_cfg)
+    logging.info("Pipeline config: %s", pipeline_cfg)
+    logging.debug("Pipeline config: %s", pipeline_cfg)
+      
+    return PipelineManager(pipeline_cfg,
+                           None, p_type)
+    
+
diff --git a/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline.yaml b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline.yaml
new file mode 100644
index 0000000..505471a
--- /dev/null
+++ b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline.yaml
@@ -0,0 +1,13 @@
+---
+sources:
+    - name: meter_source
+      interval: 600
+      meters:
+          - "*"
+      sinks:
+          - meter_sink
+sinks:
+    - name: meter_sink
+      transformers:
+      publishers:
+          - notifier://
diff --git a/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline_agent.conf b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline_agent.conf
new file mode 100644
index 0000000..9d811f6
--- /dev/null
+++ b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline_agent.conf
@@ -0,0 +1,32 @@
+#[LOGGING]
+#level = DEBUG 
+#filename = pipeline_agent.log
+
+[RABBITMQ]
+Rabbitmq_username = openstack
+Rabbitmq_passwd = 4815196be370811224fe
+Rabbitmq_host = 10.11.10.1
+Rabbitmq_port = 5672
+Ceilometer_service = ceilometer-agent-central,ceilometer-alarm-evaluator,ceilometer-api 
+
+[loggers]
+keys=root
+
+[handlers]
+keys=logfile
+
+[formatters]
+keys=logfileformatter
+
+[logger_root]
+level=INFO
+handlers=logfile
+
+[formatter_logfileformatter]
+format='%(asctime)s %(filename)s %(levelname)s %(message)s'
+
+[handler_logfile]
+class=handlers.RotatingFileHandler
+level=NOTSET
+args=('pipeline_agent.log','a',1000000,100)
+formatter=logfileformatter
diff --git a/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline_agent.log b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline_agent.log
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline_agent.log
diff --git a/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline_agent.py b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline_agent.py
new file mode 100644
index 0000000..a751b8e
--- /dev/null
+++ b/xos/synchronizer/ceilometer/pipeline_agent_module/pipeline_agent.py
@@ -0,0 +1,208 @@
+import pika
+import yaml
+import subprocess
+import logging
+import logging.config
+import operator
+import json
+import ConfigParser
+import pipeline
+import utils
+#from ceilometer import pipeline
+from collections import  OrderedDict
+
+
+class UnsortableList(list):
+    def sort(self, *args, **kwargs):
+        pass
+
+class UnsortableOrderedDict(OrderedDict):
+    def items(self, *args, **kwargs):
+        return UnsortableList(OrderedDict.items(self, *args, **kwargs))
+
+#yaml.add_representer(UnsortableOrderedDict, yaml.representer.SafeRepresenter.represent_dict)
+
+
+tmp_pipeline_conf = "/tmp/pipeline.yaml"
+
+'''
+LEVELS = {'DEBUG': logging.DEBUG,
+          'INFO': logging.INFO,
+          'WARNING': logging.WARNING,
+          'ERROR': logging.ERROR,
+          'CRITICAL': logging.CRITICAL}
+
+_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+'''
+def get_source_info(meter):
+    sink_name = meter + "_sink"
+    meter_name = meter+"_name"   
+    source_info = {'interval': 6,'meters': [meter],'name': meter_name,'sinks':[sink_name]}
+    logging.debug("* new source_info :%s",source_info)
+    return (source_info,sink_name)
+
+def get_sink_info(meter,sink_name,target):
+    sink_info = {'publishers':['notifier://',target],'transformers':None ,'name': sink_name}
+    logging.debug("* new source_info :%s",sink_info)
+    return sink_info
+
+def restart_ceilometer_services():
+    try : 
+       config = ConfigParser.ConfigParser()
+       config.read('pipeline_agent.conf')
+       services = config.get('RABBITMQ','Ceilometer_service')
+       service = services.split(",")
+    except Exception as e:
+        logging.error("* Error in confing file:%s",e.__str__())
+        return False
+    else :
+        for service_name in service:
+            command = ['service',service_name, 'restart'];
+            logging.debug("Executing: %s command",command)
+            #shell=FALSE for sudo to work.
+            try :
+                subprocess.call(command, shell=False)
+            except Exception as e:
+                logging.error("* %s command execution failed with error %s",command,e.__str__())
+                return False
+    return True 
+   
+def check_meter_with_pipeline_cfg(pipeline_cfg_file,meter=None,target=None):
+    #import pdb;pdb.set_trace() 
+    try :
+        pipeline._setup_pipeline_manager(pipeline_cfg_file,None)
+    except Exception as e:
+        logging.error ("Got Exception: %s",e.__str__())
+        return False 
+    return True
+   
+
+def callback(ch, method, properties, msg):
+    logging.debug(" [x] Received %r",msg)
+    #import pdb; pdb.set_trace()
+    #yaml.add_representer(UnsortableOrderedDict, yaml.representer.SafeRepresenter.represent_dict)
+    orig_pipeline_conf = "/etc/ceilometer/pipeline.yaml"
+    with open (orig_pipeline_conf, 'r') as fap:
+         data = fap.read()
+         pipeline_cfg = yaml.safe_load(data)
+    logging.debug("Pipeline config: %s", pipeline_cfg)
+
+    try : 
+        json_msg = json.loads(msg)
+        meter = json_msg['sub_info']
+        publisher = json_msg['target']
+        flag = json_msg['action']
+        update_status = []  
+        if type(meter) is list:
+            logging.debug("Metere is a list ... Need to handle it ")
+            for meter_info in meter :
+                update_status.append(update_pipeline_yaml(meter_info,publisher,flag))
+        else :
+             update_status.append(update_pipeline_yaml(meter,publisher,flag))
+ 
+        if reduce(operator.or_,  update_status):
+            if not restart_ceilometer_services():
+                logging.error("Error in restarting ceilometer services")
+                return False
+    except Exception as e :
+        logging.error("Got exception:%s in parsing message",e.__str__())
+        return False
+
+   
+
+
+ 
+def update_pipeline_yaml(meter,publisher,flag):
+    logging.debug("meter name:%s",meter)
+    logging.debug("publisher or target name:%s",publisher)
+
+    orig_pipeline_conf = "/etc/ceilometer/pipeline.yaml"
+    ''' Parsing orginal pipeline yaml file '''
+    try :
+         with open (orig_pipeline_conf, 'r') as fap:
+             data = fap.read()
+             pipeline_cfg = yaml.safe_load(data)
+         logging.debug("Pipeline config: %s", pipeline_cfg)
+   
+         ''' Chcking parsing errors '''
+    
+         if not check_meter_with_pipeline_cfg(orig_pipeline_conf) :
+             logging.error("Original pipeline.yaml parsing failed")
+             return False
+         else :
+             status = None
+             if flag == "ADD" :
+                 status = utils.update_conf_to_pipe_line_cfg(meter,publisher,pipeline_cfg)
+             elif flag == "DEL" :
+                 status = utils.delete_conf_from_pipe_line_cfg(meter,publisher,pipeline_cfg)
+       
+             if status == True : 
+                 tmp_pipeline_conf = "/tmp/pipeline.yaml"
+                 with open(tmp_pipeline_conf, "w") as f:
+                      yaml.safe_dump( pipeline_cfg, f ,default_flow_style=False)
+                 if check_meter_with_pipeline_cfg(tmp_pipeline_conf,meter,publisher) :
+                      logging.debug("Tmp pipeline.yaml parsed sucessfully,coping it as orig")
+                      with open(orig_pipeline_conf, "w") as f:
+                          yaml.safe_dump( pipeline_cfg, f ,default_flow_style=False)
+                      return True
+                 else :
+                      logging.info("Retaining original conf,as update meter info has errors")
+                      return False     
+    except Exception as e:
+        logging.error("* Error in confing file:%s",e.__str__())
+        return False
+
+ 
+def msg_queue_listner():
+    
+    try:
+        config = ConfigParser.ConfigParser()
+        config.read('pipeline_agent.conf')
+        rabbitmq_username = config.get('RABBITMQ','Rabbitmq_username')
+        rabbitmq_passwd = config.get('RABBITMQ','Rabbitmq_passwd')
+        rabbitmq_host = config.get('RABBITMQ','Rabbitmq_host')
+        rabbitmq_port = int ( config.get('RABBITMQ','Rabbitmq_port') )
+        '''
+        log_level    = config.get('LOGGING','level')
+        log_file       = config.get('LOGGING','filename')
+ 
+        level = LEVELS.get(log_level, logging.NOTSET)
+        logging.basicConfig(filename=log_file,format='%(asctime)s %(filename)s %(levelname)s %(message)s',\
+                    datefmt=_DEFAULT_LOG_DATE_FORMAT,level=level)
+        '''
+        logging.config.fileConfig('pipeline_agent.conf', disable_existing_loggers=False)  
+    except Exception as e:
+        logging.error("* Error in confing file:%s",e.__str__())
+    else :
+        logging.debug("*------------------Rabbit MQ Server Info---------")
+        logging.debug("rabbitmq_username:%s",rabbitmq_username)
+        logging.debug("rabbitmq_passwd:%s",rabbitmq_passwd)
+        logging.debug("rabbitmq_host:%s",rabbitmq_host)
+        logging.debug("rabbitmq_port:%s",rabbitmq_port)
+        credentials = pika.PlainCredentials(rabbitmq_username,rabbitmq_passwd)
+        parameters = pika.ConnectionParameters(rabbitmq_host,
+                                               rabbitmq_port,
+                                               '/',
+                                               credentials)
+        connection = pika.BlockingConnection(parameters)
+        channel = connection.channel()
+        #channel.queue_declare(queue='pubsub')
+        channel.exchange_declare(exchange='pubsub',
+                         type='fanout')
+
+        result = channel.queue_declare(exclusive=True)
+        queue_name = result.method.queue
+
+        channel.queue_bind(exchange='pubsub',
+                    queue=queue_name)
+        logging.debug("[*] Waiting for messages. To exit press CTRL+C")
+
+        channel.basic_consume(callback,
+                              queue=queue_name,
+                              no_ack=True)
+        channel.start_consuming()
+
+if __name__ == "__main__":
+    #logging.debug("* Starting pipeline agent module")
+    msg_queue_listner()
+
diff --git a/xos/synchronizer/ceilometer/pipeline_agent_module/utils.py b/xos/synchronizer/ceilometer/pipeline_agent_module/utils.py
new file mode 100644
index 0000000..3aa4865
--- /dev/null
+++ b/xos/synchronizer/ceilometer/pipeline_agent_module/utils.py
@@ -0,0 +1,404 @@
+import yaml
+import random
+import string
+import logging
+import fnmatch
+
+def main():
+    orig_pipeline_conf = "/etc/ceilometer/pipeline.yaml"
+    with open (orig_pipeline_conf, 'r') as fap:
+        data = fap.read()
+        pipeline_cfg = yaml.safe_load(data)
+    return pipeline_cfg
+
+def build_meter_list():
+    ''' function to exiting  meter list from pipeline.yaml'''
+    orig_pipeline_conf = "/etc/ceilometer/pipeline.yaml"
+    with open (orig_pipeline_conf, 'r') as fap:
+         data = fap.read()
+         pipeline_cfg = yaml.safe_load(data)
+    source_cfg = pipeline_cfg['sources']
+    meter_list=[]
+    for i in source_cfg:
+         meter_list.append(i['meters'])
+    
+    return meter_list
+
+def get_sink_name_from_publisher(publisher,pipeline_cfg):
+    sink_cfg = pipeline_cfg['sinks']
+    ''' Iterating over the list of publishers to get sink name'''
+    try :
+        for sinks in sink_cfg:
+            pub_list = sinks.get('publishers')
+            try :
+                k = pub_list.index(publisher)
+                return sinks.get('name') 
+            except Exception as e:
+                #print ("Got Exception",e.__str__())
+                continue   
+    except Exception as e:
+        return None
+
+def get_source_name_from_meter(meter,pipeline_cfg):
+    source_cfg = pipeline_cfg['sources']
+    ''' Iternating over the list of meters to get source name'''
+    try :  
+        for sources in source_cfg:
+            meter_list = sources.get('meters')
+            try :
+                k = meter_list.index(meter)
+                return sources.get('name')
+            except Exception as e:
+                #print ("Got Exception",e.__str__())
+                continue   
+    except Exception as e:
+        return None
+
+def get_source_name_from_with_meter_patter_match(meter,pipeline_cfg):
+    ''' Iternating over the list of meters for wildcard match to get source name'''
+    source_cfg = pipeline_cfg['sources']
+    try :
+        for sources in source_cfg:
+            meter_list = sources.get('meters')
+            for k in meter_list:
+                if k[0] == "*":
+                    logging.warning("Ignoring wild card meter(*) case ")
+                    continue
+                if fnmatch.fnmatch(k,meter):
+                    logging.debug("substring match")
+                    return (sources.get('name'),"superset",k)
+                if fnmatch.fnmatch(meter,k):
+                    logging.debug("input is super match")
+                    return (sources.get('name'),"subset",k)
+    except Exception as e:
+        return None,None,None
+
+    return None,None,None
+
+def get_source_name_from_sink_name(sink_name,pipeline_cfg):
+    ''' iterating over list of sources to get sink name'''
+    source_cfg = pipeline_cfg['sources']
+    try :
+        for sources in source_cfg:
+            sink_list = sources.get("sinks")
+            try :
+                k = sink_list.index(sink_name)
+            #sources.get("meters").append("m2")
+                return sources.get("name")
+            except Exception as e:
+                continue
+    except Exception as e:
+        return None
+
+def get_sink_name_from_source_name(source_name,pipeline_cfg):
+    ''' iterating over list of sinks to get sink name'''
+    source_cfg = pipeline_cfg['sources']
+    try :
+        for sources in source_cfg:
+            try :
+                if  sources.get("name") == source_name: 
+                    return sources.get("sinks")
+            except Exception as e:
+                continue
+    except Exception as e:
+        return None 
+
+def add_meter_to_source(meter_name,source_name,pipeline_cfg):
+    ''' iterating over the list of sources to add meter to the matching source'''
+    source_cfg = pipeline_cfg['sources']
+    try :
+        for sources in source_cfg:
+            try :
+                if  sources.get("name") == source_name: 
+                    sources.get("meters").append(meter_name)
+                    return True 
+            except Exception as e:
+                continue
+    except Exception as e:
+        return False
+
+def get_meter_list_from_source(source_name,pipeline_cfg):
+    ''' iterating over the list of sources to get meters under the given source'''
+    source_cfg = pipeline_cfg['sources']
+    try :
+        for sources in source_cfg:
+            try :
+                if  sources.get("name") == source_name:
+                    return sources.get("meters")
+            except Exception as e:
+                continue
+    except Exception as e:
+        return None
+
+def get_publisher_list_from_sink(sink_name,pipeline_cfg):
+    sink_cfg = pipeline_cfg['sinks']
+    ''' Iterating over the list of sinks to build publishers list '''
+    publisher_list = []
+    try :
+        for sinks in sink_cfg:
+            try :
+                for j in sink_name:
+                    if j == sinks.get("name"):
+                        publisher_list.append(sinks.get("publishers"))
+                        return publisher_list
+            except Exception as e:
+                #print ("Got Exception",e.__str__())
+                continue   
+    except Exception as e:
+        return None
+
+def get_publisher_list_from_sinkname(sink_name,pipeline_cfg):
+    sink_cfg = pipeline_cfg['sinks']
+    ''' Iterating over the list of sinks to build publishers list '''
+    try :
+        for sinks in sink_cfg:
+            pub_list = sinks.get('publishers')
+            try :
+                 if sink_name == sinks.get("name"):
+                     return pub_list   
+            except Exception as e:
+                #print ("Got Exception",e.__str__())
+                continue
+    except Exception as e:
+        return None
+
+
+def delete_meter_from_source(meter_name,source_name,pipeline_cfg) :
+    ''' function to delete meter for the given source '''
+    source_cfg = pipeline_cfg['sources']
+    try :
+        for sources in source_cfg:
+            try :
+                if  sources.get("name") == source_name:
+                    meter_list = sources.get('meters')
+                    try :
+                       meter_index = meter_list.index(meter_name)
+                       logging.debug("meter name is present at index:%s",meter_index)
+                       if len(meter_list) == 1 and meter_index == 0:
+                           logging.debug("Only one meter exists removing entire source entry")
+                           source_cfg.remove(sources)
+                       else :
+                           meter_list.pop(meter_index)
+                       return True     
+                    except Exception as e:
+                        continue
+            except Exception as e:
+                continue
+    except Exception as e:
+        return False 
+
+def delete_publisher_from_sink(publisher,sink_name,pipeline_cfg):
+    sink_cfg = pipeline_cfg['sinks']
+    ''' Iterating over the list of publishers '''
+    try :
+        for sinks in sink_cfg:
+            pub_list = sinks.get('publishers')
+            #print pub_list
+            try :
+                if sink_name == sinks.get("name"):
+                    k = pub_list.index(publisher)
+                    pub_list.pop(k)
+                    #print k
+                    return True 
+            except Exception as e:
+                #print ("Got Exception",e.__str__())
+                continue   
+    except Exception as e:
+        return None
+
+def delete_sink_from_pipeline(sink_name,pipeline_cfg):
+    sink_cfg = pipeline_cfg['sinks']
+    try :
+        for sinks in sink_cfg:
+            if sink_name == sinks.get("name"):
+                sink_cfg.remove(sinks)
+                return True
+    except Exception as e:
+        return False 
+
+def add_publisher_to_sink(publisher_name,sink_name,pipeline_cfg):
+    sink_cfg = pipeline_cfg['sinks']
+    try :
+        for sinks in sink_cfg:
+            if sink_name == sinks.get("name"):
+                sinks.get('publishers').append(publisher_name)
+                return True
+    except Exception as e:
+        return None 
+
+def get_source_info(meter):
+    name = ''.join(random.choice(string.ascii_lowercase) for _ in range(9))
+    sink_name = name + "_sink"
+    meter_name = name + "_source"
+    source_info = {'interval': 6,'meters': [meter],'name': meter_name,'sinks':[sink_name]}
+    logging.debug("* new source_info :%s",source_info)
+    return (source_info,sink_name)
+
+def get_sink_info(meter,sink_name,target):
+    sink_info = {'publishers':['notifier://',target],'transformers':None ,'name': sink_name}
+    logging.debug("* new source_info :%s",sink_info)
+    return sink_info
+
+def delete_conf_from_pipe_line_cfg(meter,publisher,pipeline_cfg):
+    #import pdb;pdb.set_trace()
+   
+    sink_name = get_sink_name_from_publisher(publisher,pipeline_cfg)
+    source_name = get_source_name_from_meter(meter,pipeline_cfg)
+      
+    if sink_name is None or source_name is None:
+       logging.error("Either sink or source name Exists in the pipeline.yaml")
+       return False
+   
+    meter_list = get_meter_list_from_source(source_name,pipeline_cfg)
+   
+    temp_meter_list = []
+   
+    for j in meter_list:
+        temp_meter_list.append(j)
+  
+    pub_list = get_publisher_list_from_sinkname(sink_name,pipeline_cfg)
+    if len(pub_list) > 2 and  len(temp_meter_list) == 1:
+        if delete_publisher_from_sink(publisher,sink_name,pipeline_cfg):
+            return True
+        else:
+            return False    
+  
+    if delete_meter_from_source(meter,source_name,pipeline_cfg) :
+        if len(temp_meter_list) == 1:
+            if delete_publisher_from_sink(publisher,sink_name,pipeline_cfg) :
+                if get_source_name_from_sink_name(sink_name,pipeline_cfg) is None:
+                    delete_sink_from_pipeline(sink_name,pipeline_cfg)  
+                return True
+            else :
+                return False 
+        return True         
+    return False
+    
+
+def update_sink_aggrgation(meter,publisher,source_name,matching_meter,meter_match,pipeline_cfg):
+    ''' Build new source and sink '''
+    new_source_info,new_sink_name = get_source_info(meter)
+    new_sink_info = get_sink_info(meter,new_sink_name,publisher)
+
+    meter_list = get_meter_list_from_source(source_name,pipeline_cfg)
+    sink_name = get_sink_name_from_source_name(source_name,pipeline_cfg)
+    publisher_list = get_publisher_list_from_sink(sink_name,pipeline_cfg)
+    for i in publisher_list:
+        for j in i:
+            #print j
+            if j not in new_sink_info.get("publishers") :
+                new_sink_info.get("publishers").append(j)
+                #print new_sink_info
+
+    cfg_source = pipeline_cfg['sources']
+    cfg_sink = pipeline_cfg['sinks']
+    if meter_match == "superset" :
+        new_source_info.get("meters").append("!"+ matching_meter)
+    elif meter_match == "subset" :
+        ''' here need to get list of meters with sub-string match '''
+        add_meter_to_source("!"+meter,source_name,pipeline_cfg)
+        add_publisher_to_sink(publisher,sink_name,pipeline_cfg)
+
+    logging.debug("-----------  Before Updating Meter Info ------------------")
+    logging.debug("%s",pipeline_cfg)
+
+    ''' Updating source and sink info '''
+    cfg_source.append(new_source_info)
+    cfg_sink.append(new_sink_info)
+    logging.debug("-----------  After Updating Meter Info --------------------")
+    logging.debug("%s",pipeline_cfg)
+
+def update_conf_to_pipe_line_cfg(meter,publisher,pipeline_cfg):
+    #import pdb;pdb.set_trace()
+    sink_name = get_sink_name_from_publisher(publisher,pipeline_cfg)
+    source_name = get_source_name_from_meter(meter,pipeline_cfg)
+    if sink_name is None :
+        logging.debug("No Sink exists with the given Publisher")
+        if source_name is None:
+            ''' Commenting the code related t owild card '''
+            '''
+            pattern_source_name,pattern,matching_meter = get_source_name_from_with_meter_patter_match(meter,pipeline_cfg)
+            if pattern_source_name is not None:
+                if pattern == "superset" :
+                    #add_meter_to_source("!"+meter,pattern_source_name,pipeline_cfg)
+                    update_sink_aggrgation(meter,publisher,pattern_source_name,matching_meter,"superset",pipeline_cfg)
+                    #print pipeline_cfg
+                    return True 
+                if pattern == "subset" :
+                   update_sink_aggrgation(meter,publisher,pattern_source_name,matching_meter,"subset",pipeline_cfg)
+                   return True    
+            ''' 
+            source_info,sink_name = get_source_info(meter)
+            sink_info = get_sink_info(meter,sink_name,publisher)
+  
+            cfg_source = pipeline_cfg['sources']
+            cfg_sink = pipeline_cfg['sinks']
+
+            logging.debug("-----------  Before Updating Meter Info ------------------")
+            logging.debug("%s",pipeline_cfg)
+
+            ''' Updating source and sink info '''
+            cfg_source.append(source_info)
+            cfg_sink.append(sink_info)
+            logging.debug("-----------  After Updating Meter Info --------------------")
+            logging.debug("%s",pipeline_cfg)
+            return True
+        else :
+             logging.debug("Meter already exists in the conf file under source name:%s ",source_name)
+             meter_list = get_meter_list_from_source(source_name,pipeline_cfg)
+             publisher_list=[]
+             if len(meter_list) > 1:
+                sink_name = get_sink_name_from_source_name(source_name,pipeline_cfg)
+                '''
+                if type(sink_name) is list :
+                    for sinkname in sink_name:    
+                        publisher_list.append(get_publisher_list_from_sink(sinkname,pipeline_cfg))
+                else :
+                     publisher_list.append(get_publisher_list_from_sink(sink_name,pipeline_cfg))
+                ''' 
+                publisher_list = get_publisher_list_from_sink(sink_name,pipeline_cfg)
+                new_source_info,new_sink_name = get_source_info(meter)
+                new_sink_info = get_sink_info(meter,new_sink_name,publisher)
+                for i in publisher_list:
+                     for j in i:
+                          #print j
+                          if j not in new_sink_info.get("publishers") :
+                              new_sink_info.get("publishers").append(j)
+                cfg_source = pipeline_cfg['sources']
+                cfg_sink = pipeline_cfg['sinks']
+
+                logging.debug("-----------  Before Updating Meter Info ------------------")
+                logging.debug("%s",pipeline_cfg)
+
+                ''' Updating source and sink info '''
+                cfg_source.append(new_source_info)
+                cfg_sink.append(new_sink_info)
+                logging.debug("-----------  After Updating Meter Info --------------------")
+                logging.debug("%s",pipeline_cfg)
+                delete_meter_from_source(meter,source_name,pipeline_cfg)
+                logging.debug("%s",pipeline_cfg)
+                return True
+             else :
+                  logging.debug ("Source already exists for this meter add publisher to it .....:%s",source_name)
+                  sink_name_list = get_sink_name_from_source_name(source_name,pipeline_cfg)
+                  for sink_name in sink_name_list :
+                      add_publisher_to_sink(publisher,sink_name,pipeline_cfg)
+                  return True    
+                  #print pipeline_cfg
+    else :
+         logging.debug ("Publisher already exists under sink:%s",sink_name)
+         if get_source_name_from_meter(meter,pipeline_cfg) is not None:
+             logging.debug("Both meter  and publisher already exists in the conf file")
+             logging.debug( "Update request is not sucessful")
+             return False
+         else :
+             source_name = get_source_name_from_sink_name(sink_name,pipeline_cfg) 
+             logging.debug ("Need to add meter to already existing source which \
+                    has this publisher under one of its sink")
+             #print source_name
+             if add_meter_to_source(meter,source_name,pipeline_cfg):
+                 logging.debug("Meter added sucessfully")
+                 return True   
+               
+            
+        
diff --git a/xos/synchronizer/ceilometer/udp_proxy/README b/xos/synchronizer/ceilometer/udp_proxy/README
new file mode 100644
index 0000000..65d3e47
--- /dev/null
+++ b/xos/synchronizer/ceilometer/udp_proxy/README
@@ -0,0 +1,14 @@
+
+#please install below packages before running udpagent:
+
+sudo apt-get install ubuntu-cloud-keyring
+echo "deb http://ubuntu-cloud.archive.canonical.com/ubuntu" "trusty-updates/kilo main" > /etc/apt/sources.list.d/cloudarchive-kilo.list
+sudo apt-get update
+sudo apt-get install msgpack-python
+sudo pip install kombu
+sudo pip install oslo.utils
+sudo apt-get install gcc
+sudo apt-get install python-dev
+sudo pip install oslo.utils
+sudo pip install Babel
+sudo pip install wrapt
diff --git a/xos/synchronizer/ceilometer/udp_proxy/udpagent.conf b/xos/synchronizer/ceilometer/udp_proxy/udpagent.conf
new file mode 100644
index 0000000..b1aa65a
--- /dev/null
+++ b/xos/synchronizer/ceilometer/udp_proxy/udpagent.conf
@@ -0,0 +1,30 @@
+[udpservice]
+udp_address = 130.127.133.123
+udp_port = 4455
+rabbit_userid = openstack
+rabbit_password = password
+rabbit_hosts = 130.127.133.175
+acord_control_exchange = openstack_infra
+
+[loggers]
+keys=root
+
+[handlers]
+keys=logfile
+
+[formatters]
+keys=logfileformatter
+
+[logger_root]
+level=INFO
+#level=DEBUG
+handlers=logfile
+
+[formatter_logfileformatter]
+format='%(asctime)s %(filename)s %(levelname)s %(message)s'
+
+[handler_logfile]
+class=handlers.RotatingFileHandler
+level=NOTSET
+args=('udpagent.log','a',1000000,100)
+formatter=logfileformatter
diff --git a/xos/synchronizer/ceilometer/udp_proxy/udpagent.py b/xos/synchronizer/ceilometer/udp_proxy/udpagent.py
new file mode 100644
index 0000000..81826ad
--- /dev/null
+++ b/xos/synchronizer/ceilometer/udp_proxy/udpagent.py
@@ -0,0 +1,95 @@
+import logging
+import logging.handlers
+import logging.config
+import ConfigParser
+import socket
+import msgpack
+from kombu.connection import BrokerConnection
+from kombu.messaging import Exchange, Queue, Consumer, Producer
+import six
+import uuid
+import datetime
+from oslo_utils import netutils
+from oslo_utils import timeutils
+from oslo_utils import units
+
+
+
+#logging.config.fileConfig('udpagent.conf', disable_existing_loggers=False)
+class UdpService():
+    def __init__(self):
+        config = ConfigParser.ConfigParser()
+        config.read('udpagent.conf')
+        self.udp_address      =  config.get('udpservice','udp_address')
+        self.udp_port         =  int(config.get('udpservice','udp_port')) 
+        self.rabbit_user      =  config.get('udpservice','rabbit_userid')
+        self.rabbit_password  =  config.get('udpservice','rabbit_password') 
+        self.rabbit_host      =  config.get('udpservice','rabbit_hosts')
+        self.acord_control_exchange = config.get('udpservice','acord_control_exchange') 
+        logging.config.fileConfig('udpagent.conf', disable_existing_loggers=False)
+    def printconfig(self):
+        logging.debug("udp_address:%s",self.udp_address) 
+        logging.debug("udp_port:%s",self.udp_port)  
+        logging.debug("rabbit_user:%s",self.rabbit_user)  
+        logging.debug("rabbit_password:%s",self.rabbit_password)  
+        logging.debug("rabbit_hosts:%s",self.rabbit_host)  
+        logging.debug("cord_control_exchange:%s",self.acord_control_exchange)
+   
+    def convert_sample_to_event_data(self,msg):
+        event_data = {'event_type': 'infra','message_id':six.text_type(uuid.uuid4()),'publisher_id': 'cpe_publisher_id','timestamp':datetime.datetime.now().isoformat(),'priority':'INFO','payload':msg}
+        return event_data
+   
+    def setup_rabbit_mq_channel(self):
+        service_exchange = Exchange(self.acord_control_exchange, "topic", durable=False)
+        # connections/channels
+        connection = BrokerConnection(self.rabbit_host, self.rabbit_user, self.rabbit_password)
+        logging.info("Connection to RabbitMQ server successful")
+        channel = connection.channel()
+        # produce
+        self.producer = Producer(channel, exchange=service_exchange, routing_key='notifications.info')
+
+    def start_udp(self):
+        address_family = socket.AF_INET
+        if netutils.is_valid_ipv6(self.udp_address):
+            address_family = socket.AF_INET6
+        udp = socket.socket(address_family, socket.SOCK_DGRAM)
+        udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        udp.bind((self.udp_address,
+                  self.udp_port))
+
+        self.setup_rabbit_mq_channel()
+        self.udp_run = True
+        while self.udp_run:
+            # NOTE(jd) Arbitrary limit of 64K because that ought to be
+            # enough for anybody.
+            data, source = udp.recvfrom(64 * units.Ki)
+            try:
+                sample = msgpack.loads(data, encoding='utf-8')
+            except Exception:
+                logging.warning("UDP: Cannot decode data sent by %s", source)
+            else:
+                try:
+                    if sample.has_key("event_type"):
+                         logging.debug("recevied event  :%s",sample)
+                         self.producer.publish(sample)
+                    else:
+                         logging.debug("recevied Sample  :%s",sample)
+                         msg = self.convert_sample_to_event_data(sample)
+                         self.producer.publish(msg)
+                except Exception:
+                    logging.exception("UDP: Unable to publish msg")
+       
+
+def main():
+    try:
+        udpservice=UdpService()
+        udpservice.printconfig()
+        udpservice.start_udp()  
+
+    except Exception as e:
+        logging.exception("* Error in starting udpagent:%s",e.__str__())
+    
+
+
+if __name__ == "__main__":
+    main()