summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiddharth Wagle <swagle@hortonworks.com>2017-08-07 15:46:11 -0700
committerSiddharth Wagle <swagle@hortonworks.com>2017-08-07 15:46:11 -0700
commit0aab38034f66153adacf2d3627d36caf18cfc1a0 (patch)
treebc5e752611be6b5bba726e75710aa527f0d14d18
parent2533644003a58fa3a6909d10ed184eb9d126fe1c (diff)
AMBARI-21664. HDFS namenode rpc and connection load metrics are not showing. (swagle)
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/alerts.json158
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-env.xml114
-rw-r--r--ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-env.xml93
-rw-r--r--ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-ini.xml320
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-env.xml245
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml147
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml56
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml167
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml489
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-log4j.xml66
-rw-r--r--ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-logsearch-conf.xml201
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml723
-rw-r--r--ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-client.xml51
-rw-r--r--ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-server.xml80
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/kerberos.json122
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metainfo.xml156
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metrics.json2472
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/alerts/alert_ambari_metrics_monitor.py104
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/hbaseSmokeVerify.sh34
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/AMBARI_METRICS.txt245
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/FLUME.txt17
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HBASE.txt588
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HDFS.txt277
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HOST.txt37
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/KAFKA.txt190
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/STORM.txt7
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/YARN.txt178
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/__init__.py19
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/ams.py388
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/ams_service.py103
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/functions.py51
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase.py267
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_master.py70
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_regionserver.py66
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_service.py53
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/metrics_collector.py133
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/metrics_monitor.py59
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params.py257
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params_linux.py50
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params_windows.py53
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/service_check.py166
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/service_mapping.py22
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/split_points.py236
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/status.py46
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/status_params.py39
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams.conf.j235
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams_collector_jaas.conf.j226
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams_zookeeper_jaas.conf.j226
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hadoop-metrics2-hbase.properties.j263
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_client_jaas.conf.j223
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_grant_permissions.j239
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_master_jaas.conf.j226
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_regionserver_jaas.conf.j226
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/metric_groups.conf.j237
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/metric_monitor.ini.j231
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/regionservers.j220
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/smoketest_metrics.json.j215
-rwxr-xr-xambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/widgets.json209
-rw-r--r--ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py10
59 files changed, 145 insertions, 9856 deletions
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/alerts.json
deleted file mode 100755
index 90401e68c1..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/alerts.json
+++ /dev/null
@@ -1,158 +0,0 @@
-{
- "AMBARI_METRICS": {
- "service": [
- {
- "name": "metrics_monitor_process_percent",
- "label": "Percent Metrics Monitors Available",
- "description": "This alert is triggered if a percentage of Metrics Monitor processes are not up and listening on the network for the configured warning and critical thresholds.",
- "interval": 1,
- "scope": "SERVICE",
- "enabled": true,
- "source": {
- "type": "AGGREGATE",
- "alert_name": "ams_metrics_monitor_process",
- "reporting": {
- "ok": {
- "text": "affected: [{1}], total: [{0}]"
- },
- "warning": {
- "text": "affected: [{1}], total: [{0}]",
- "value": 0.1
- },
- "critical": {
- "text": "affected: [{1}], total: [{0}]",
- "value": 0.3
- }
- }
- }
- }
- ],
- "METRICS_COLLECTOR": [
- {
- "name": "ams_metrics_collector_autostart",
- "label": "Metrics Collector - Auto-Restart Status",
- "description": "This alert is triggered if the Metrics Collector has been restarted automatically too frequently in last one hour. By default, a Warning alert is triggered if restarted twice in one hour and a Critical alert is triggered if restarted 4 or more times in one hour.",
- "interval": 1,
- "scope": "ANY",
- "enabled": true,
- "source": {
- "type": "RECOVERY",
- "reporting": {
- "ok": {
- "text": "Metrics Collector has not been auto-started and is running normally{0}."
- },
- "warning": {
- "text": "Metrics Collector has been auto-started {1} times{0}.",
- "count": 2
- },
- "critical": {
- "text": "Metrics Collector has been auto-started {1} times{0}.",
- "count": 4
- }
- }
- }
- },
- {
- "name": "ams_metrics_collector_process",
- "label": "Metrics Collector Process",
- "description": "This alert is triggered if the Metrics Collector cannot be confirmed to be up and listening on the configured port for number of seconds equal to threshold.",
- "interval": 1,
- "scope": "ANY",
- "enabled": true,
- "source": {
- "type": "PORT",
- "uri": "{{ams-site/timeline.metrics.service.webapp.address}}",
- "default_port": 6188,
- "reporting": {
- "ok": {
- "text": "TCP OK - {0:.3f}s response on port {1}"
- },
- "warning": {
- "text": "TCP OK - {0:.3f}s response on port {1}",
- "value": 1.5
- },
- "critical": {
- "text": "Connection failed: {0} to {1}:{2}",
- "value": 5.0
- }
- }
- }
- },
- {
- "name": "ams_metrics_collector_hbase_master_process",
- "label": "Metrics Collector - HBase Master Process",
- "description": "This alert is triggered if the Metrics Collector's HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
- "interval": 1,
- "scope": "ANY",
- "source": {
- "type": "PORT",
- "uri": "{{ams-hbase-site/hbase.master.info.port}}",
- "default_port": 61310,
- "reporting": {
- "ok": {
- "text": "TCP OK - {0:.3f}s response on port {1}"
- },
- "warning": {
- "text": "TCP OK - {0:.3f}s response on port {1}",
- "value": 1.5
- },
- "critical": {
- "text": "Connection failed: {0} to {1}:{2}",
- "value": 5.0
- }
- }
- }
- },
- {
- "name": "ams_metrics_collector_hbase_master_cpu",
- "label": "Metrics Collector - HBase Master CPU Utilization",
- "description": "This host-level alert is triggered if CPU utilization of the Metrics Collector's HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
- "interval": 5,
- "scope": "ANY",
- "enabled": true,
- "source": {
- "type": "METRIC",
- "uri": {
- "http": "{{ams-hbase-site/hbase.master.info.port}}",
- "default_port": 61310,
- "connection_timeout": 5.0
- },
- "reporting": {
- "ok": {
- "text": "{1} CPU, load {0:.1%}"
- },
- "warning": {
- "text": "{1} CPU, load {0:.1%}",
- "value": 200
- },
- "critical": {
- "text": "{1} CPU, load {0:.1%}",
- "value": 250
- },
- "units" : "%"
- },
- "jmx": {
- "property_list": [
- "java.lang:type=OperatingSystem/SystemCpuLoad",
- "java.lang:type=OperatingSystem/AvailableProcessors"
- ],
- "value": "{0} * 100"
- }
- }
- }
- ],
- "METRICS_MONITOR": [
- {
- "name": "ams_metrics_monitor_process",
- "label": "Metrics Monitor Status",
- "description": "This alert indicates the status of the Metrics Monitor process as determined by the monitor status script.",
- "interval": 1,
- "scope": "ANY",
- "source": {
- "type": "SCRIPT",
- "path": "BigInsights/4.2/services/AMBARI_METRICS/package/alerts/alert_ambari_metrics_monitor.py"
- }
- }
- ]
- }
-}
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-env.xml
deleted file mode 100755
index 45f9880032..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-env.xml
+++ /dev/null
@@ -1,114 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- ~ Licensed to the Apache Software Foundation (ASF) under one
- ~ or more contributor license agreements. See the NOTICE file
- ~ distributed with this work for additional information
- ~ regarding copyright ownership. The ASF licenses this file
- ~ to you under the Apache License, Version 2.0 (the
- ~ "License"); you may not use this file except in compliance
- ~ with the License. You may obtain a copy of the License at
- ~
- ~ http://www.apache.org/licenses/LICENSE-2.0
- ~
- ~ Unless required by applicable law or agreed to in writing, software
- ~ distributed under the License is distributed on an "AS IS" BASIS,
- ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ~ See the License for the specific language governing permissions and
- ~ limitations under the License.
- -->
-
-<configuration supports_do_not_extend="true">
- <property>
- <name>ambari_metrics_user</name>
- <display-name>Ambari Metrics User</display-name>
- <value>ams</value>
- <on-ambari-upgrade add="true"/>
- <property-type>USER</property-type>
- <description>Ambari Metrics User Name.</description>
- </property>
- <property>
- <name>metrics_collector_log_dir</name>
- <value>/var/log/ambari-metrics-collector</value>
- <display-name>Metrics Collector log dir</display-name>
- <description>Collector log directory.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>metrics_collector_pid_dir</name>
- <value>/var/run/ambari-metrics-collector</value>
- <display-name>Metrics Collector pid dir</display-name>
- <description>Collector pid directory.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>metrics_monitor_pid_dir</name>
- <value>/var/run/ambari-metrics-monitor</value>
- <display-name>Metrics Monitor pid dir</display-name>
- <description>Monitor pid directory.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>metrics_monitor_log_dir</name>
- <value>/var/log/ambari-metrics-monitor</value>
- <display-name>Metrics Monitor log dir</display-name>
- <description>Monitor log directory.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>metrics_collector_heapsize</name>
- <value>512</value>
- <description>Metrics Collector Heap Size</description>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <type>int</type>
- <unit>MB</unit>
- </value-attributes>
- </property>
- <property>
- <name>content</name>
- <value>
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# Collector Log directory for log4j
-export AMS_COLLECTOR_LOG_DIR={{ams_collector_log_dir}}
-
-# Monitor Log directory for outfile
-export AMS_MONITOR_LOG_DIR={{ams_monitor_log_dir}}
-
-# Collector pid directory
-export AMS_COLLECTOR_PID_DIR={{ams_collector_pid_dir}}
-
-# Monitor pid directory
-export AMS_MONITOR_PID_DIR={{ams_monitor_pid_dir}}
-
-# AMS HBase pid directory
-export AMS_HBASE_PID_DIR={{hbase_pid_dir}}
-
-# AMS Collector heapsize
-export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}
-
-# HBase normalizer enabled
-export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}
-
-# HBase compaction policy enabled
-export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}
-
-# AMS Collector options
-export AMS_COLLECTOR_OPTS="-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native"
-{% if security_enabled %}
-export AMS_COLLECTOR_OPTS="$AMS_COLLECTOR_OPTS -Djava.security.auth.login.config={{ams_collector_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
-{% endif %}
-
-# AMS Collector GC options
-export AMS_COLLECTOR_GC_OPTS="-XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`"
-export AMS_COLLECTOR_OPTS="$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS"
-
- </value>
- <on-ambari-upgrade add="true"/>
- </property>
-
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-env.xml
deleted file mode 100644
index eaafc6b429..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-env.xml
+++ /dev/null
@@ -1,93 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- ~ Licensed to the Apache Software Foundation (ASF) under one
- ~ or more contributor license agreements. See the NOTICE file
- ~ distributed with this work for additional information
- ~ regarding copyright ownership. The ASF licenses this file
- ~ to you under the Apache License, Version 2.0 (the
- ~ "License"); you may not use this file except in compliance
- ~ with the License. You may obtain a copy of the License at
- ~
- ~ http://www.apache.org/licenses/LICENSE-2.0
- ~
- ~ Unless required by applicable law or agreed to in writing, software
- ~ distributed under the License is distributed on an "AS IS" BASIS,
- ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ~ See the License for the specific language governing permissions and
- ~ limitations under the License.
- -->
-<configuration>
- <property>
- <name>metrics_grafana_log_dir</name>
- <value>/var/log/ambari-metrics-grafana</value>
- <display-name>Metrics Grafana log dir</display-name>
- <description>Metrics Grafana log directory.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>metrics_grafana_pid_dir</name>
- <value>/var/run/ambari-metrics-grafana</value>
- <display-name>Metrics Grafana pid dir</display-name>
- <description>Metrics Grafana pid directory.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>metrics_grafana_data_dir</name>
- <value>/var/lib/ambari-metrics-grafana</value>
- <display-name>Metrics Grafana data dir</display-name>
- <description>Metrics Grafana data directory.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>metrics_grafana_username</name>
- <value>admin</value>
- <display-name>Grafana Admin Username</display-name>
- <description>
- Metrics Grafana Username. This value cannot be modified by Ambari
- except on initial install. Please make sure the username change in
- Grafana is reflected in Ambari.
- </description>
- <value-attributes>
- <type>db_user</type>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property require-input="true">
- <name>metrics_grafana_password</name>
- <value/>
- <property-type>PASSWORD</property-type>
- <display-name>Grafana Admin Password</display-name>
- <description>
- Metrics Grafana password. This value cannot be modified by Ambari
- except on initial install. Please make sure the password change in
- Grafana is reflected back in Ambari.
- </description>
- <value-attributes>
- <overridable>false</overridable>
- <type>password</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>content</name>
- <display-name>ams-grafana-env template</display-name>
- <value>
-# Set environment variables here.
-
-# AMS UI Server Home Dir
-export AMS_GRAFANA_HOME_DIR={{ams_grafana_home_dir}}
-
-# AMS UI Server Data Dir
-export AMS_GRAFANA_DATA_DIR={{ams_grafana_data_dir}}
-
-# AMS UI Server Log Dir
-export AMS_GRAFANA_LOG_DIR={{ams_grafana_log_dir}}
-
-# AMS UI Server PID Dir
-export AMS_GRAFANA_PID_DIR={{ams_grafana_pid_dir}}
- </value>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-ini.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-ini.xml
deleted file mode 100644
index 3c87ab13bf..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-grafana-ini.xml
+++ /dev/null
@@ -1,320 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- ~ Licensed to the Apache Software Foundation (ASF) under one
- ~ or more contributor license agreements. See the NOTICE file
- ~ distributed with this work for additional information
- ~ regarding copyright ownership. The ASF licenses this file
- ~ to you under the Apache License, Version 2.0 (the
- ~ "License"); you may not use this file except in compliance
- ~ with the License. You may obtain a copy of the License at
- ~
- ~ http://www.apache.org/licenses/LICENSE-2.0
- ~
- ~ Unless required by applicable law or agreed to in writing, software
- ~ distributed under the License is distributed on an "AS IS" BASIS,
- ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ~ See the License for the specific language governing permissions and
- ~ limitations under the License.
- -->
-<configuration>
- <property>
- <name>port</name>
- <value>3000</value>
- <description>The http port to use</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>protocol</name>
- <value>http</value>
- <description>Protocol (http or https)</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>cert_file</name>
- <value>/etc/ambari-metrics-grafana/conf/ams-grafana.crt</value>
- <description>Path to grafana certificate (.crt) file.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>cert_key</name>
- <value>/etc/ambari-metrics-grafana/conf/ams-grafana.key</value>
- <description>Path to grafana certificate key (.key) file.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ca_cert</name>
- <value></value>
- <description>Path to CA root certificate or bundle to be used to validate the Grafana certificate against.
- For self signed certificates, this value can be the same as the value for 'cert_file'.
- (If a path is not specified, the certificate validation is skipped)</description>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>content</name>
- <display-name>ams-grafana-ini template</display-name>
- <value>
-##################### Grafana Configuration Example #####################
-#
-# Everything has defaults so you only need to uncomment things you want to
-# change
-
-# possible values : production, development
-; app_mode = production
-
-#################################### Paths ####################################
-[paths]
-# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
-#
-;data = /var/lib/grafana
-data = {{ams_grafana_data_dir}}
-#
-# Directory where grafana can store logs
-#
-;logs = /var/log/grafana
-logs = {{ams_grafana_log_dir}}
-
-
-#################################### Server ####################################
-[server]
-# Protocol (http or https)
-;protocol = http
-protocol = {{ams_grafana_protocol}}
-# The ip address to bind to, empty will bind to all interfaces
-;http_addr =
-
-# The http port to use
-;http_port = 3000
-http_port = {{ams_grafana_port}}
-
-# The public facing domain name used to access grafana from a browser
-;domain = localhost
-
-# Redirect to correct domain if host header does not match domain
-# Prevents DNS rebinding attacks
-;enforce_domain = false
-
-# The full public facing url
-;root_url = %(protocol)s://%(domain)s:%(http_port)s/
-
-# Log web requests
-;router_logging = false
-
-# the path relative working path
-;static_root_path = public
-static_root_path = /usr/lib/ambari-metrics-grafana/public
-
-# enable gzip
-;enable_gzip = false
-
-# https certs &amp; key file
-;cert_file =
-;cert_key =
-cert_file = {{ams_grafana_cert_file}}
-cert_key = {{ams_grafana_cert_key}}
-
-#################################### Database ####################################
-[database]
-# Either "mysql", "postgres" or "sqlite3", it's your choice
-;type = sqlite3
-;host = 127.0.0.1:3306
-;name = grafana
-;user = root
-;password =
-
-# For "postgres" only, either "disable", "require" or "verify-full"
-;ssl_mode = disable
-
-# For "sqlite3" only, path relative to data_path setting
-;path = grafana.db
-
-#################################### Session ####################################
-[session]
-# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
-;provider = file
-
-# Provider config options
-# memory: not have any config yet
-# file: session dir path, is relative to grafana data_path
-# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
-# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
-# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
-;provider_config = sessions
-
-# Session cookie name
-;cookie_name = grafana_sess
-
-# If you use session in https only, default is false
-;cookie_secure = false
-
-# Session life time, default is 86400
-;session_life_time = 86400
-
-#################################### Analytics ####################################
-[analytics]
-# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
-# No ip addresses are being tracked, only simple counters to track
-# running instances, dashboard and error counts. It is very helpful to us.
-# Change this option to false to disable reporting.
-;reporting_enabled = true
-
-# Google Analytics universal tracking code, only enabled if you specify an id here
-;google_analytics_ua_id =
-
-#################################### Security ####################################
-[security]
-# default admin user, created on startup
-admin_user = {{ams_grafana_admin_user}}
-
-# default admin password, can be changed before first start of grafana, or in profile settings
-;admin_password =
-
-# used for signing
-;secret_key = SW2YcwTIb9zpOOhoPsMm
-
-# Auto-login remember days
-;login_remember_days = 7
-;cookie_username = grafana_user
-;cookie_remember_name = grafana_remember
-
-# disable gravatar profile images
-;disable_gravatar = false
-
-# data source proxy whitelist (ip_or_domain:port seperated by spaces)
-;data_source_proxy_whitelist =
-
-#################################### Users ####################################
-[users]
-# disable user signup / registration
-;allow_sign_up = true
-
-# Allow non admin users to create organizations
-;allow_org_create = true
-
-# Set to true to automatically assign new users to the default organization (id 1)
-;auto_assign_org = true
-
-# Default role new users will be automatically assigned (if disabled above is set to true)
-;auto_assign_org_role = Viewer
-
-# Background text for the user field on the login page
-;login_hint = email or username
-
-#################################### Anonymous Auth ##########################
-[auth.anonymous]
-# enable anonymous access
-enabled = true
-
-# specify organization name that should be used for unauthenticated users
-org_name = Main Org.
-
-# specify role for unauthenticated users
-;org_role = Admin
-
-#################################### Github Auth ##########################
-[auth.github]
-;enabled = false
-;allow_sign_up = false
-;client_id = some_id
-;client_secret = some_secret
-;scopes = user:email,read:org
-;auth_url = https://github.com/login/oauth/authorize
-;token_url = https://github.com/login/oauth/access_token
-;api_url = https://api.github.com/user
-;team_ids =
-;allowed_organizations =
-
-#################################### Google Auth ##########################
-[auth.google]
-;enabled = false
-;allow_sign_up = false
-;client_id = some_client_id
-;client_secret = some_client_secret
-;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
-;auth_url = https://accounts.google.com/o/oauth2/auth
-;token_url = https://accounts.google.com/o/oauth2/token
-;api_url = https://www.googleapis.com/oauth2/v1/userinfo
-;allowed_domains =
-
-#################################### Auth Proxy ##########################
-[auth.proxy]
-;enabled = false
-;header_name = X-WEBAUTH-USER
-;header_property = username
-;auto_sign_up = true
-
-#################################### Basic Auth ##########################
-[auth.basic]
-;enabled = true
-
-#################################### Auth LDAP ##########################
-[auth.ldap]
-;enabled = false
-;config_file = /etc/grafana/ldap.toml
-
-#################################### SMTP / Emailing ##########################
-[smtp]
-;enabled = false
-;host = localhost:25
-;user =
-;password =
-;cert_file =
-;key_file =
-;skip_verify = false
-;from_address = admin@grafana.localhost
-
-[emails]
-;welcome_email_on_sign_up = false
-
-#################################### Logging ##########################
-[log]
-# Either "console", "file", default is "console"
-# Use comma to separate multiple modes, e.g. "console, file"
-;mode = console, file
-
-# Buffer length of channel, keep it as it is if you don't know what it is.
-;buffer_len = 10000
-
-# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"
-;level = Info
-
-# For "console" mode only
-[log.console]
-;level =
-
-# For "file" mode only
-[log.file]
-;level =
-# This enables automated log rotate(switch of following options), default is true
-;log_rotate = true
-
-# Max line number of single file, default is 1000000
-;max_lines = 1000000
-
-# Max size shift of single file, default is 28 means 1 &lt;&lt; 28, 256MB
-;max_lines_shift = 28
-
-# Segment log daily, default is true
-;daily_rotate = true
-
-# Expired days of log file(delete after max days), default is 7
-;max_days = 7
-
-#################################### AMPQ Event Publisher ##########################
-[event_publisher]
-;enabled = false
-;rabbitmq_url = amqp://localhost/
-;exchange = grafana_events
-
-;#################################### Dashboard JSON files ##########################
-[dashboards.json]
-;enabled = false
-;path = /var/lib/grafana/dashboards
-path = /usr/lib/ambari-metrics-grafana/public/dashboards
- </value>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-env.xml
deleted file mode 100755
index 00d396ca9d..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-env.xml
+++ /dev/null
@@ -1,245 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_do_not_extend="true">
- <property>
- <name>hbase_log_dir</name>
- <value>/var/log/ambari-metrics-collector</value>
- <description>Log Directories for HBase.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase_pid_dir</name>
- <value>/var/run/ambari-metrics-collector/</value>
- <description>Pid Directory for HBase.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase_classpath_additional</name>
- <value></value>
- <description>Additional directory or jar in classpath for HBase.</description>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase_regionserver_heapsize</name>
- <value>1024</value>
- <description>
- HBase RegionServer Heap Size. In embedded mode, total heap size is
- sum of master and regionserver heap sizes.
- </description>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <type>int</type>
- <unit>MB</unit>
- </value-attributes>
- <depends-on>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.cluster.distributed</name>
- </property>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.rootdir</name>
- </property>
- </depends-on>
- </property>
- <property>
- <name>regionserver_xmn_size</name>
- <value>256</value>
- <description>HBase RegionServer maximum value for young generation heap size.</description>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <type>int</type>
- <unit>MB</unit>
- </value-attributes>
- <depends-on>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.cluster.distributed</name>
- </property>
- </depends-on>
- </property>
- <property>
- <name>hbase_master_xmn_size</name>
- <value>256</value>
- <description>
- HBase Master maximum value for young generation heap size.
- </description>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <type>int</type>
- <unit>MB</unit>
- </value-attributes>
- <depends-on>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.cluster.distributed</name>
- </property>
- </depends-on>
- </property>
- <property>
- <name>hbase_master_maxperm_size</name>
- <value>128</value>
- <description>HBase RegionServer maximum value for perm heap size.</description>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <type>int</type>
- <unit>MB</unit>
- </value-attributes>
- </property>
- <property>
- <name>hbase_regionserver_xmn_ratio</name>
- <value>0.2</value>
- <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase_master_heapsize</name>
- <value>1024</value>
- <description>
- HBase Master Heap Size. In embedded mode, total heap size is
- sum of master and regionserver heap sizes.
- </description>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <type>int</type>
- <unit>MB</unit>
- </value-attributes>
- <depends-on>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.cluster.distributed</name>
- </property>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.rootdir</name>
- </property>
- </depends-on>
- </property>
- <property>
- <name>max_open_files_limit</name>
- <value>32768</value>
- <description>
- The maximum number of open file descriptors by process
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
-
- <!-- hbase-env.sh -->
- <property>
- <name>content</name>
- <description>This is the jinja template for hbase-env.sh file</description>
- <value>
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6+ required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-additional_cp={{hbase_classpath_additional}}
-if [ -n "$additional_cp" ];
-then
- export HBASE_CLASSPATH=${HBASE_CLASSPATH}:$additional_cp
-else
- export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-fi
-
-# The maximum amount of heap to use for hbase shell.
-export HBASE_SHELL_OPTS="-Xmx256m"
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{hbase_tmp_dir}}"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-
-{% if java_version &lt; 8 %}
-export HBASE_MASTER_OPTS=" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}} -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
-export HBASE_REGIONSERVER_OPTS="-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-{% else %}
-export HBASE_MASTER_OPTS=" -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
-export HBASE_REGIONSERVER_OPTS=" -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-{% endif %}
-
-
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{hbase_log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{hbase_pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
-export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
-{% endif %}
-
-# use embedded native libs
-_HADOOP_NATIVE_LIB="/usr/lib/ams-hbase/lib/hadoop-native/"
-export HBASE_OPTS="$HBASE_OPTS -Djava.library.path=${_HADOOP_NATIVE_LIB}"
-
-# Unset HADOOP_HOME to avoid importing HADOOP installed cluster related configs
-export HADOOP_HOME={{ams_hbase_home_dir}}
- </value>
- <on-ambari-upgrade add="true"/>
- </property>
-
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml
deleted file mode 100755
index 4eea14fd74..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml
+++ /dev/null
@@ -1,147 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false" supports_do_not_extend="true">
-
- <property>
- <name>content</name>
- <description>Custom log4j.properties</description>
- <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Define some default values that can be overridden by system properties
-hbase.root.logger=INFO,console
-hbase.security.logger=INFO,console
-hbase.log.dir=.
-hbase.log.file=hbase.log
-
-# Define the root logger to the system property "hbase.root.logger".
-log4j.rootLogger=${hbase.root.logger}
-
-# Logging Threshold
-log4j.threshold=ALL
-
-#
-# Daily Rolling File Appender
-#
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
-
-# Rolling File Appender properties
-hbase.log.maxfilesize=256MB
-hbase.log.maxbackupindex=20
-
-# Rolling File Appender
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
-
-log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
-log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
-
-#
-# Security audit appender
-#
-hbase.security.log.file=SecurityAuth.audit
-hbase.security.log.maxfilesize=256MB
-hbase.security.log.maxbackupindex=20
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
-log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.category.SecurityLogger=${hbase.security.logger}
-log4j.additivity.SecurityLogger=false
-#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
-
-#
-# Null Appender
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
-
-# Custom Logging levels
-
-log4j.logger.org.apache.zookeeper=INFO
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.hbase=INFO
-# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
-#log4j.logger.org.apache.hadoop.dfs=DEBUG
-# Set this class to log INFO only otherwise its OTT
-# Enable this to get detailed connection error/retry logging.
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
-
-
-# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
-#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
-
-# Uncomment the below if you want to remove logging of client region caching'
-# and scan of .META. messages
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
-# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
-
- </value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <show-property-name>false</show-property-name>
- </value-attributes>
- </property>
-
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml
deleted file mode 100755
index 13726a2f21..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="true" supports_do_not_extend="true">
- <property>
- <name>security.client.protocol.acl</name>
- <value>*</value>
- <description>ACL for HRegionInterface protocol implementations (ie.
- clients talking to HRegionServers)
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
-
- <property>
- <name>security.admin.protocol.acl</name>
- <value>*</value>
- <description>ACL for HMasterInterface protocol implementation (ie.
- clients talking to HMaster for admin operations).
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
-
- <property>
- <name>security.masterregion.protocol.acl</name>
- <value>*</value>
- <description>ACL for HMasterRegionInterface protocol implementations
- (for HRegionServers communicating with HMaster)
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml
deleted file mode 100755
index 64799c1449..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml
+++ /dev/null
@@ -1,167 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_do_not_extend="true">
- <property>
- <name>ams.zookeeper.keytab</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>ams.zookeeper.principal</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hadoop.security.authentication</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.coprocessor.master.classes</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.coprocessor.region.classes</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.master.kerberos.principal</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.master.keytab.file</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.myclient.keytab</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.myclient.principal</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.regionserver.kerberos.principal</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.regionserver.keytab.file</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.security.authentication</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.security.authorization</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.zookeeper.property.authProvider.1</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.zookeeper.property.jaasLoginRenew</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.zookeeper.property.kerberos.removeHostFromPrincipal</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>hbase.zookeeper.property.kerberos.removeRealmFromPrincipal</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>zookeeper.znode.parent</name>
- <value></value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
deleted file mode 100755
index bf62b8e365..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
+++ /dev/null
@@ -1,489 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
- <property>
- <name>hbase.rootdir</name>
- <value>file:///var/lib/ambari-metrics-collector/hbase</value>
- <description>
- Ambari Metrics service uses HBase as default storage backend. Set the rootdir for
- HBase to either local filesystem path if using Ambari Metrics in embedded mode or
- to a HDFS dir, example: hdfs://namenode.example.org:8020/amshbase.
- </description>
- <depends-on>
- <property>
- <type>core-site</type>
- <name>fs.defaultFS</name>
- </property>
- <property>
- <type>ams-site</type>
- <name>timeline.metrics.service.operation.mode</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.tmp.dir</name>
- <value>/var/lib/ambari-metrics-collector/hbase-tmp</value>
- <description>
- Temporary directory on the local filesystem.
- Change this setting to point to a location more permanent
- than '/tmp' (The '/tmp' directory is often cleared on
- machine restart).
- </description>
- <value-attributes>
- <type>directory</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.local.dir</name>
- <value>${hbase.tmp.dir}/local</value>
- <description>Directory on the local filesystem to be used as a local storage
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.cluster.distributed</name>
- <value>false</value>
- <description>
- The mode the cluster will be in. Possible values are false for
- standalone mode and true for distributed mode. If false, startup will run
- all HBase and ZooKeeper daemons together in the one JVM.
- </description>
- <depends-on>
- <property>
- <type>ams-site</type>
- <name>timeline.metrics.service.operation.mode</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.master.wait.on.regionservers.mintostart</name>
- <value>1</value>
- <description>
- Ensure that HBase Master waits for # many region server to start.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.zookeeper.quorum</name>
- <value>{{zookeeper_quorum_hosts}}</value>
- <description>Comma separated list of servers in the ZooKeeper Quorum.
- For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
- By default this is set to localhost for local and pseudo-distributed modes
- of operation. For a fully-distributed setup, this should be set to a full
- list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
- this is the list of servers which we will start/stop ZooKeeper on.
- </description>
- <final>true</final>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.master.info.bindAddress</name>
- <value>0.0.0.0</value>
- <description>The bind address for the HBase Master web UI</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.master.info.port</name>
- <value>61310</value>
- <description>The port for the HBase Master web UI.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.regionserver.info.port</name>
- <value>61330</value>
- <description>The port for the HBase RegionServer web UI.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.master.port</name>
- <value>61300</value>
- <description>The port for the HBase Master web UI.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.regionserver.port</name>
- <value>61320</value>
- <description>The port for the HBase RegionServer web UI.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.hregion.majorcompaction</name>
- <value>0</value>
- <description>
- The time (in milliseconds) between 'major' compactions of all
- HStoreFiles in a region.
- 0 to disable automated major compactions.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.query.spoolThresholdBytes</name>
- <value>20971520</value>
- <description>
- Threshold size in bytes after which results from parallelly executed
- query results are spooled to disk. Default is 20 mb.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.zookeeper.property.dataDir</name>
- <value>${hbase.tmp.dir}/zookeeper</value>
- <description>
- Property from ZooKeeper's config zoo.cfg.
- The directory where the snapshot is stored.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.client.scanner.caching</name>
- <value>10000</value>
- <description>
- Number of rows that will be fetched when calling next on a scanner
- if it is not served from (local, client) memory.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.normalizer.enabled</name>
- <value>false</value>
- <description>If set to true, Master will try to keep region size
- within each table approximately the same.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.normalizer.period</name>
- <value>600000</value>
- <description>Period in ms at which the region normalizer runs in the Master.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.master.normalizer.class</name>
- <value>org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer</value>
- <description>
- Class used to execute the region normalization when the period occurs.
- See the class comment for more on how it works
- http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hfile.block.cache.size</name>
- <value>0.3</value>
- <description>
- Percentage of maximum heap (-Xmx setting) to allocate to block cache
- used by a StoreFile. Default of 0.4 means allocate 40%.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.regionserver.global.memstore.upperLimit</name>
- <value>0.5</value>
- <description>
- Maximum size of all memstores in a region server before new
- updates are blocked and flushes are forced. Defaults to 40% of heap
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.regionserver.global.memstore.lowerLimit</name>
- <value>0.4</value>
- <description>
- When memstores are being forced to flush to make room in
- memory, keep flushing until we hit this mark. Defaults to 35% of heap.
- This value equal to hbase.regionserver.global.memstore.upperLimit causes
- the minimum possible flushing to occur when updates are blocked due to
- memstore limiting.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.groupby.maxCacheSize</name>
- <value>307200000</value>
- <description>
- Size in bytes of pages cached during GROUP BY spilling. Default is 100Mb.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.hregion.max.filesize</name>
- <value>4294967296</value>
- <description>
- Maximum HFile size. If the sum of the sizes of a region&#x2019;s HFiles has grown
- to exceed this value, the region is split in two. Default is 10Gb.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.hregion.memstore.block.multiplier</name>
- <value>4</value>
- <description>
- Block updates if memstore has hbase.hregion.memstore.block.multiplier
- times hbase.hregion.memstore.flush.size bytes. Useful preventing runaway
- memstore during spikes in update traffic.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.hstore.flusher.count</name>
- <value>2</value>
- <description>
- The number of flush threads. With fewer threads, the MemStore flushes
- will be queued. With more threads, the flushes will be executed in parallel,
- increasing the load on HDFS, and potentially causing more compactions.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.query.timeoutMs</name>
- <value>300000</value>
- <description>
- Number of milliseconds after which a query will timeout on the client.
- Default is 5 min.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.query.keepAliveMs</name>
- <value>300000</value>
- <description>
- Number of milliseconds after which a query will keep the connection to HBase alive.
- Default is 5 min.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.client.scanner.timeout.period</name>
- <value>300000</value>
- <description>
- Client scanner lease period in milliseconds.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.rpc.timeout</name>
- <value>300000</value>
- <description>
- This is for the RPC layer to define how long HBase client applications
- take for a remote call to time out. It uses pings to check connections
- but will eventually throw a TimeoutException.
- </description>
- <display-name>HBase RPC Timeout</display-name>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.regionserver.thread.compaction.large</name>
- <value>2</value>
- <description>
- Configuration key for the large compaction threads.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.regionserver.thread.compaction.small</name>
- <value>3</value>
- <description>
- Configuration key for the small compaction threads.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.zookeeper.property.clientPort</name>
- <value>{{zookeeper_clientPort}}</value>
- <depends-on>
- <property>
- <type>zoo.cfg</type>
- <name>clientPort</name>
- </property>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.cluster.distributed</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.zookeeper.peerport</name>
- <value>61288</value>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.zookeeper.leaderport</name>
- <value>61388</value>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.hstore.blockingStoreFiles</name>
- <value>200</value>
- <description>
- If more than this number of StoreFiles exist in any one Store
- (one StoreFile is written per flush of MemStore), updates are blocked for
- this region until a compaction is completed, or until
- hbase.hstore.blockingWaitTime has been exceeded.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.hregion.memstore.flush.size</name>
- <value>134217728</value>
- <description>
- Memstore will be flushed to disk if size of the memstore exceeds this
- number of bytes. Value is checked by a thread that runs every
- hbase.server.thread.wakefrequency.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.snapshot.enabled</name>
- <value>false</value>
- <description>Enable/Disable HBase snapshots.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.replication</name>
- <value>false</value>
- <description>Enable/Disable HBase replication.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hbase.zookeeper.property.tickTime</name>
- <value>6000</value>
- <description>
- The length of a single tick, which is the basic time unit used by
- ZooKeeper, as measured in milliseconds. This property setting only
- affects the ZK server started by AMS in embedded mode. Unit = ms.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>zookeeper.session.timeout</name>
- <value>120000</value>
- <description>ZooKeeper session timeout in milliseconds.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>zookeeper.session.timeout.localHBaseCluster</name>
- <value>120000</value>
- <description>
- ZooKeeper session timeout in milliseconds for
- pseudo distributed mode.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.sequence.saltBuckets</name>
- <value>2</value>
- <description>
- Controls the number of pre-allocated regions for SYSTEM.SEQUENCE table.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.query.maxGlobalMemoryPercentage</name>
- <value>15</value>
- <description>
- Percentage of total heap memory (i.e. Runtime.getRuntime().maxMemory())
- that all threads may use.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.spool.directory</name>
- <value>${hbase.tmp.dir}/phoenix-spool</value>
- <description>
- Set directory for Phoenix spill files. If possible set this to a
- different mount point from the one for hbase.rootdir in embedded mode.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.mutate.batchSize</name>
- <value>10000</value>
- <description>
- The number of rows that are batched together and automatically committed
- during the execution of an UPSERT SELECT or DELETE statement.
- This affects performance of group by aggregators if they are being used.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.query.rowKeyOrderSaltedTable</name>
- <value>true</value>
- <description>
- When set, we disallow user specified split points on salted table to ensure
- that each bucket will only contains entries with the same salt byte.
- When this property is turned on, the salted table would behave just like
- a normal table and would return items in rowkey order for scans
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.coprocessor.maxServerCacheTimeToLiveMs</name>
- <value>60000</value>
- <description>
- Maximum living time (in milliseconds) of server caches. A cache entry
- expires after this amount of time has passed since last access. Consider
- adjusting this parameter when a server-side IOException(
- &#x201C;Could not find hash cache for joinId&#x201D;) happens. Getting warnings like
- &#x201C;Earlier hash cache(s) might have expired on servers&#x201D; might also be a
- sign that this number should be increased.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.coprocessor.maxMetaDataCacheSize</name>
- <value>20480000</value>
- <description>
- Max size in bytes of total server-side metadata cache after which
- evictions will begin to occur based on least recent access time.
- Default is 20Mb
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.client.read.shortcircuit</name>
- <value>true</value>
- <description>Enable/Disable short circuit read for your client.
- Hadoop servers should be configured to allow short circuit read
- for the hbase user for this to take effect
- </description>
- <depends-on>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.cluster.distributed</name>
- </property>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.rootdir</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>zookeeper.znode.parent</name>
- <value>/ams-hbase-unsecure</value>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-log4j.xml
deleted file mode 100755
index 3735e6c62d..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-log4j.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- ~ Licensed to the Apache Software Foundation (ASF) under one
- ~ or more contributor license agreements. See the NOTICE file
- ~ distributed with this work for additional information
- ~ regarding copyright ownership. The ASF licenses this file
- ~ to you under the Apache License, Version 2.0 (the
- ~ "License"); you may not use this file except in compliance
- ~ with the License. You may obtain a copy of the License at
- ~
- ~ http://www.apache.org/licenses/LICENSE-2.0
- ~
- ~ Unless required by applicable law or agreed to in writing, software
- ~ distributed under the License is distributed on an "AS IS" BASIS,
- ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ~ See the License for the specific language governing permissions and
- ~ limitations under the License.
- -->
-
-<configuration supports_final="false" supports_do_not_extend="true">
-
- <property>
- <name>content</name>
- <description>Custom log4j.properties</description>
- <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Define some default values that can be overridden by system properties
-ams.log.dir=.
-ams.log.file=ambari-metrics-collector.log
-
-# Root logger option
-log4j.rootLogger=INFO,file
-
-# Direct log messages to a log file
-log4j.appender.file=org.apache.log4j.RollingFileAppender
-log4j.appender.file.File=${ams.log.dir}/${ams.log.file}
-log4j.appender.file.MaxFileSize=80MB
-log4j.appender.file.MaxBackupIndex=60
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- </value>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <show-property-name>false</show-property-name>
- </value-attributes>
- </property>
-
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-logsearch-conf.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-logsearch-conf.xml
deleted file mode 100644
index 72d44dbd7c..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-logsearch-conf.xml
+++ /dev/null
@@ -1,201 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
- <property>
- <name>service_name</name>
- <display-name>Service name</display-name>
- <description>Service name for Logsearch Portal (label)</description>
- <value>AMS</value>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>component_mappings</name>
- <display-name>Component mapping</display-name>
- <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
- <value>METRICS_COLLECTOR:ams_collector,ams_hbase_master,ams_hbase_regionserver;METRICS_MONITOR:ams_monitor;METRICS_GRAFANA:ams_grafana</value>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>content</name>
- <display-name>Logfeeder config</display-name>
- <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
- <value>
-{
- "input":[
- {
- "type":"ams_hbase_master",
- "rowtype":"service",
- "path":"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/hbase-ams-master-*.log"
- },
- {
- "type":"ams_hbase_regionserver",
- "rowtype":"service",
- "path":"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/hbase-ams-regionserver-*.log"
- },
- {
- "type":"ams_collector",
- "rowtype":"service",
- "path":"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/ambari-metrics-collector.log"
- },
- {
- "type":"ams_monitor",
- "rowtype":"service",
- "path":"{{default('/configurations/ams-env/metrics_monitor_log_dir', '/var/log/ambari-metrics-monitor')}}/ambari-metrics-monitor.out"
- },
- {
- "type":"ams_grafana",
- "rowtype":"service",
- "path":"{{default('/configurations/ams-grafana-env/metrics_grafana_log_dir', '/var/log/ambari-metrics-grafana')}}/grafana.log"
- }
- ],
- "filter":[
- {
- "filter":"grok",
- "conditions":{
- "fields":{
- "type":[
- "ams_collector"
- ]
- }
- },
- "log4j_format":"%d{ISO8601} %p %c: %m%n",
- "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
- "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
- "post_map_values":{
- "logtime":{
- "map_date":{
- "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
- }
- }
- }
- },
- {
- "filter":"grok",
- "conditions":{
- "fields":{
- "type":[
- "ams_hbase_master",
- "ams_hbase_regionserver"
- ]
- }
- },
- "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
- "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
- "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
- "post_map_values":{
- "logtime":{
- "map_date":{
- "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
- }
- }
- }
- },
- {
- "filter":"grok",
- "conditions":{
- "fields":{
- "type":[
- "ams_grafana"
- ]
- }
- },
- "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
- "multiline_pattern":"^(%{DATESTAMP:logtime})",
- "message_pattern":"(?m)^%{DATESTAMP:logtime}%{SPACE}\\[%{WORD:level}\\]%{SPACE}%{GREEDYDATA:log_message}",
- "post_map_values":{
- "logtime":{
- "map_date":{
- "target_date_pattern":"yyyy/MM/dd HH:mm:ss"
- }
- },
- "level":[
- {
- "map_fieldvalue":{
- "pre_value":"I",
- "post_value":"INFO"
- }
- },
- {
- "map_fieldvalue":{
- "pre_value":"W",
- "post_value":"WARN"
- }
- },
- {
- "map_fieldvalue":{
- "pre_value":"D",
- "post_value":"DEBUG"
- }
- },
- {
- "map_fieldvalue":{
- "pre_value":"E",
- "post_value":"ERROR"
- }
- },
- {
- "map_fieldvalue":{
- "pre_value":"F",
- "post_value":"FATAL"
- }
- }
- ]
- }
- },
- {
- "filter":"grok",
- "conditions":{
- "fields":{
- "type":[
- "ams_monitor"
- ]
- }
- },
- "log4j_format":"",
- "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
- "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{LOGLEVEL:level}\\]%{SPACE}%{JAVAFILE:file}:%{INT:line_number}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
- "post_map_values":{
- "logtime":{
- "map_date":{
- "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
- }
- }
- },
- "level":[
- {
- "map_fieldvalue":{
- "pre_value":"WARNING",
- "post_value":"WARN"
- }
- }
- ]
- }
- ]
- }
- </value>
- <value-attributes>
- <type>content</type>
- <show-property-name>false</show-property-name>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml
deleted file mode 100755
index b9f534e66a..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-site.xml
+++ /dev/null
@@ -1,723 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
- <property>
- <name>timeline.metrics.service.operation.mode</name>
- <value>embedded</value>
- <display-name>Metrics Service operation mode</display-name>
- <description>
- Service Operation modes:
- 1) embedded: Metrics stored on local FS, HBase in Standalone mode
- 2) distributed: HBase daemons writing to HDFS
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.webapp.address</name>
- <value>0.0.0.0:6188</value>
- <description>
- The address of the metrics service web application.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.rpc.address</name>
- <value>0.0.0.0:60200</value>
- <description>
- The address of the metrics service rpc listeners.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.aggregator.checkpoint.dir</name>
- <value>/var/lib/ambari-metrics-collector/checkpoint</value>
- <display-name>Aggregator checkpoint directory</display-name>
- <description>
- Directory to store aggregator checkpoints. Change to a permanent
- location so that checkpoint ar not lost.
- </description>
- <value-attributes>
- <type>directory</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.minute.interval</name>
- <value>300</value>
- <display-name>Minute host aggregator interval</display-name>
- <description>
- Time in seconds to sleep for the minute resolution host based
- aggregator. Default resolution is 5 minutes.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.hourly.interval</name>
- <value>3600</value>
- <display-name>Hourly host aggregator interval</display-name>
- <description>
- Time in seconds to sleep for the hourly resolution host based
- aggregator. Default resolution is 1 hour.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.daily.aggregator.minute.interval</name>
- <value>86400</value>
- <description>
- Time in seconds to sleep for the day resolution host based
- aggregator. Default resolution is 24 hours.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.hourly.interval</name>
- <value>3600</value>
- <display-name>Hourly cluster aggregator Interval</display-name>
- <description>
- Time in seconds to sleep for the hourly resolution cluster wide
- aggregator. Default is 1 hour.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.daily.interval</name>
- <value>86400</value>
- <description>
- Time in seconds to sleep for the day resolution cluster wide
- aggregator. Default is 24 hours.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.minute.interval</name>
- <value>300</value>
- <display-name>Minute cluster aggregator interval</display-name>
- <description>
- Time in seconds to sleep for the minute resolution cluster wide
- aggregator. Default resolution is 5 minutes.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.second.interval</name>
- <value>120</value>
- <display-name>Second cluster aggregator interval</display-name>
- <description>
- Time in seconds to sleep for the second resolution cluster wide
- aggregator. Default resolution is 2 minutes.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier</name>
- <value>2</value>
- <description>
- Multiplier value * interval = Max allowed checkpoint lag. Effectively
- if aggregator checkpoint is greater than max allowed checkpoint delay,
- the checkpoint will be discarded by the aggregator.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier</name>
- <value>2</value>
- <display-name>Hourly host aggregator checkpoint cutOff multiplier</display-name>
- <description>
- Multiplier value * interval = Max allowed checkpoint lag. Effectively
- if aggregator checkpoint is greater than max allowed checkpoint delay,
- the checkpoint will be discarded by the aggregator.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier</name>
- <value>2</value>
- <display-name>Minute host aggregator checkpoint cutOff multiplier</display-name>
- <description>
- Multiplier value * interval = Max allowed checkpoint lag. Effectively
- if aggregator checkpoint is greater than max allowed checkpoint delay,
- the checkpoint will be discarded by the aggregator.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier</name>
- <value>2</value>
- <display-name>Hourly cluster aggregator checkpoint cutOff multiplier</display-name>
- <description>
- Multiplier value * interval = Max allowed checkpoint lag. Effectively
- if aggregator checkpoint is greater than max allowed checkpoint delay,
- the checkpoint will be discarded by the aggregator.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier</name>
- <value>2</value>
- <display-name>Second cluster aggregator checkpoint cutOff multiplier</display-name>
- <description>
- Multiplier value * interval = Max allowed checkpoint lag. Effectively
- if aggregator checkpoint is greater than max allowed checkpoint delay,
- the checkpoint will be discarded by the aggregator.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier</name>
- <value>2</value>
- <display-name>Minute cluster aggregator checkpoint cutOff multiplier</display-name>
- <description>
- Multiplier value * interval = Max allowed checkpoint lag. Effectively
- if aggregator checkpoint is greater than max allowed checkpoint delay,
- the checkpoint will be discarded by the aggregator.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier</name>
- <value>2</value>
- <description>
- Multiplier value * interval = Max allowed checkpoint lag. Effectively
- if aggregator checkpoint is greater than max allowed checkpoint delay,
- the checkpoint will be discarded by the aggregator.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.daily.disabled</name>
- <value>false</value>
- <description>
- Disable host based daily aggregations.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.hourly.disabled</name>
- <value>false</value>
- <display-name>Disable Hourly host aggregator</display-name>
- <description>
- Disable host based hourly aggregations.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.minute.disabled</name>
- <value>false</value>
- <display-name>Disable Minute host aggregator</display-name>
- <description>
- Disable host based minute aggregations.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.daily.disabled</name>
- <value>false</value>
- <description>
- Disable cluster based daily aggregations.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.hourly.disabled</name>
- <display-name>Disable Hourly cluster aggregator</display-name>
- <value>false</value>
- <description>
- Disable cluster based hourly aggregations.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.minute.disabled</name>
- <value>false</value>
- <display-name>Disable minute cluster aggregator</display-name>
- <description>
- Disable cluster based minute aggregations.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.second.disabled</name>
- <value>false</value>
- <display-name>Disable second cluster aggregator</display-name>
- <description>
- Disable cluster based second aggregations.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.second.timeslice.interval</name>
- <value>30</value>
- <display-name>Second cluster aggregator timeslice interval</display-name>
- <description>
- Lowest resolution of desired data for cluster level second aggregates.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.daily.ttl</name>
- <value>31536000</value>
- <description>
- Host based daily resolution data purge interval in seconds. Default is 1 year.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.hourly.ttl</name>
- <value>2592000</value>
- <description>
- Host based hourly resolution data purge interval in seconds. Default is 30 days.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.minute.ttl</name>
- <value>604800</value>
- <description>
- Host based minute resolution data purge interval in seconds. Default is 7 days.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.second.ttl</name>
- <value>259200</value>
- <description>
- Cluster wide second resolution data purge interval in seconds. Default is 3 days.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.minute.ttl</name>
- <value>2592000</value>
- <description>
- Cluster wide minute resolution data purge interval in seconds. Default is 30 days.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.hourly.ttl</name>
- <value>31536000</value>
- <description>
- Cluster wide hourly resolution data purge interval in seconds. Default is 1 year.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.daily.ttl</name>
- <value>63072000</value>
- <description>
- Cluster wide daily resolution data purge interval in seconds. Default is 2 years.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregator.ttl</name>
- <value>86400</value>
- <description>
- 1 minute resolution data purge interval in seconds. Default is 1 day.
- </description>
- <depends-on>
- <property>
- <type>ams-site</type>
- <name>timeline.metrics.service.operation.mode</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.hbase.data.block.encoding</name>
- <value>FAST_DIFF</value>
- <description>
- Codecs are enabled on a table by setting the DATA_BLOCK_ENCODING property.
- Default encoding is FAST_DIFF. This can be changed only before creating
- tables.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.hbase.compression.scheme</name>
- <value>SNAPPY</value>
- <description>
- Compression codes need to be installed and available before setting the
- scheme. Default compression is SNAPPY. Disable by setting to None.
- This can be changed only before creating tables.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.default.result.limit</name>
- <value>15840</value>
- <description>
- Max result limit on number of rows returned. Calculated as follows:
- 22 aggregate metrics/min * 2 * 60 * 6 : Retrieve 10 SECOND data for 2 hours.
- </description>
- <display-name>Metrics service default result limit</display-name>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.checkpointDelay</name>
- <value>60</value>
- <display-name>Metrics service checkpoint delay</display-name>
- <description>
- Time in seconds to sleep on the first run or when the checkpoint is
- too old.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.resultset.fetchSize</name>
- <display-name>Metrics service resultset fetchSize</display-name>
- <value>2000</value>
- <description>
- JDBC resultset prefect size for aggregator queries.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <!-- Phoenix properties that would manifest in the hbase-site.xml on the client side -->
- <property>
- <name>phoenix.query.maxGlobalMemoryPercentage</name>
- <value>25</value>
- <description>
- Percentage of total heap memory (i.e. Runtime.getRuntime().maxMemory())
- that all threads may use.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>phoenix.spool.directory</name>
- <value>/tmp</value>
- <description>
- Set directory for Phoenix spill files. If possible set this to a
- different mount point from the one for hbase.rootdir in embedded mode.
- </description>
- <value-attributes>
- <type>directory</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.cluster.aggregator.appIds</name>
- <value>datanode,nodemanager,hbase</value>
- <description>
- List of application ids to use for aggregating host level metrics for
- an application. Example: bytes_read across Yarn Nodemanagers.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.use.groupBy.aggregators</name>
- <value>true</value>
- <description>
- Use a groupBy aggregated query to perform host level aggregations vs
- in-memory aggregations.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.host.aggregate.splitpoints</name>
- <value> </value>
- <description>
- Pre-split regions using the split points corresponding to this property
- for the precision table that stores seconds aggregate data.
- </description>
- <depends-on>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.regionserver.global.memstore.upperLimit</name>
- </property>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.hregion.memstore.flush.size</name>
- </property>
- <property>
- <type>ams-hbase-env</type>
- <name>hbase_master_heapsize</name>
- </property>
- <property>
- <type>ams-hbase-env</type>
- <name>hbase_regionserver_heapsize</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregate.splitpoints</name>
- <value> </value>
- <description>
- Pre-split regions using the split points corresponding to this property
- for the aggregate table that stores seconds aggregate data across hosts.
- </description>
- <depends-on>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.regionserver.global.memstore.upperLimit</name>
- </property>
- <property>
- <type>ams-hbase-site</type>
- <name>hbase.hregion.memstore.flush.size</name>
- </property>
- <property>
- <type>ams-hbase-env</type>
- <name>hbase_master_heapsize</name>
- </property>
- <property>
- <type>ams-hbase-env</type>
- <name>hbase_regionserver_heapsize</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.sink.report.interval</name>
- <value>60</value>
- <description>
- Time in seconds to sleep before report metrics to collector.
- Default resolution is 1 minute.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.sink.collection.period</name>
- <value>10</value>
- <description>
- The interval between two service metrics data exports.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.watcher.disabled</name>
- <value>false</value>
- <description>
- Disable Timeline Metric Store watcher thread. Disabled by default in AMS distributed mode.
- </description>
- <depends-on>
- <property>
- <type>ams-site</type>
- <name>timeline.metrics.service.operation.mode</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.watcher.initial.delay</name>
- <value>600</value>
- <description>
- The time to delay first watcher check execution
- Default resolution is 10 minutes.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.watcher.delay</name>
- <value>30</value>
- <description>
- The delay between the termination of one
- watcher check execution and the commencement of the next
- Default resolution is 30 seconds.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.watcher.timeout</name>
- <value>30</value>
- <description>
- The maximum time to wait for a single watcher check execution
- Default resolution is 30 seconds.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.aggregators.skip.blockcache.enabled</name>
- <value>false</value>
- <description>
- Skip block cache on aggregator queries to allow, HBase block
- utilization only for user queries.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cache.commit.interval</name>
- <value>3</value>
- <description>
- Time in seconds between committing metrics from cache
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cache.size</name>
- <value>150</value>
- <description>
- Size of array blocking queue used to cache metrics
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cache.enabled</name>
- <value>true</value>
- <description>
- If set to true PhoenixHBaseAccessor will use cache to store metrics before committing them
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.http.policy</name>
- <value>HTTP_ONLY</value>
- <description>
- This configures the HTTP endpoint for Yarn Application History Server for
- Ambari Metrics System.
- The following values are supported:
- - HTTP_ONLY : Service is provided only on http
- - HTTPS_ONLY : Service is provided only on https
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>failover.strategy</name>
- <value>round-robin</value>
- <description>
- Failover strategy for metric monitors
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.hbase.init.check.enabled</name>
- <value>true</value>
- <description>
- Enable Initialization check for HBase tables during Metrics service startup.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregator.interpolation.enabled</name>
- <value>true</value>
- <description>
- Enable Linear interpolation for missing slices of data, while aggregating.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.service.metadata.filters</name>
- <value>ContainerResource</value>
- <description>
- Commas separated list of regular expressions that match metric names
- which prevents certain metrics from ending up in metadata cache.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.cluster.aggregation.sql.filters</name>
- <value>sdisk\_%,boottime</value>
- <description>
- Commas separated list of Metric names or Phoenix 'LIKE' class expressions that match metric names
- which prevents certain metrics from being aggregated across hosts.
- </description>
- <on-ambari-upgrade add="true"/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- </property>
- <property>
- <name>timeline.metrics.downsampler.topn.metric.patterns</name>
- <value>dfs.NNTopUserOpCounts.windowMs=60000.op=__%.user=%,dfs.NNTopUserOpCounts.windowMs=300000.op=__%.user=%,dfs.NNTopUserOpCounts.windowMs=1500000.op=__%.user=%</value>
- <description>
- Commas separated list of metric name regular expressions that are candidates for Top N downsampling.
- </description>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.downsampler.topn.value</name>
- <value>10</value>
- <description>
- Top N value for to be used for top N downsampling. Default is 10.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>timeline.metrics.downsampler.topn.function</name>
- <value>max</value>
- <description>
- Top N function for to be used for top N downsampling (avg/max/sum)
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>cluster.zookeeper.quorum</name>
- <value>{{cluster_zookeeper_quorum_hosts}}</value>
- <description>Comma separated list of servers in the cluster ZooKeeper Quorum.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>cluster.zookeeper.property.clientPort</name>
- <value>{{cluster_zookeeper_clientPort}}</value>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-client.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-client.xml
deleted file mode 100644
index cac39de43e..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-client.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
- <property>
- <name>ssl.client.truststore.location</name>
- <value>/etc/security/clientKeys/all.jks</value>
- <description>Location of the trust store file.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.client.truststore.type</name>
- <value>jks</value>
- <description>Optional. Default value is "jks".</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.client.truststore.password</name>
- <value>bigdata</value>
- <property-type>PASSWORD</property-type>
- <description>Password to open the trust store file.</description>
- <value-attributes>
- <type>password</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.client.truststore.alias</name>
- <value></value>
- <description>Alias used to create certificate for AMS. (Default is hostname)</description>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-server.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-server.xml
deleted file mode 100644
index 5d2745fc18..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/configuration/ams-ssl-server.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
- <property>
- <name>ssl.server.truststore.location</name>
- <value>/etc/security/serverKeys/all.jks</value>
- <description>Location of the trust store file.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.server.truststore.type</name>
- <value>jks</value>
- <description>Optional. Default value is "jks".</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.server.truststore.password</name>
- <value>bigdata</value>
- <property-type>PASSWORD</property-type>
- <description>Password to open the trust store file.</description>
- <value-attributes>
- <type>password</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.server.truststore.reload.interval</name>
- <value>10000</value>
- <description>Truststore reload interval, in milliseconds.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.server.keystore.type</name>
- <value>jks</value>
- <description>Optional. Default value is "jks".</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.server.keystore.location</name>
- <value>/etc/security/serverKeys/keystore.jks</value>
- <description>Location of the keystore file.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.server.keystore.password</name>
- <value>bigdata</value>
- <property-type>PASSWORD</property-type>
- <description>Password to open the keystore file.</description>
- <value-attributes>
- <type>password</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>ssl.server.keystore.keypassword</name>
- <value>bigdata</value>
- <property-type>PASSWORD</property-type>
- <description>Password for private key in keystore file.</description>
- <value-attributes>
- <type>password</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/kerberos.json
deleted file mode 100755
index 03c3f93239..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/kerberos.json
+++ /dev/null
@@ -1,122 +0,0 @@
-{
- "services": [
- {
- "name": "AMBARI_METRICS",
- "identities": [
- {
- "name": "/spnego"
- },
- {
- "name": "/hdfs"
- }
- ],
- "components": [
- {
- "name": "METRICS_COLLECTOR",
- "identities": [
- {
- "name": "ams_hbase_master_hbase",
- "principal": {
- "value": "amshbase/_HOST@${realm}",
- "type": "service",
- "configuration": "ams-hbase-security-site/hbase.master.kerberos.principal",
- "local_username": "${ams-env/ambari_metrics_user}"
- },
- "keytab": {
- "file": "${keytab_dir}/ams-hbase.master.keytab",
- "owner": {
- "name": "${ams-env/ambari_metrics_user}",
- "access": "r"
- },
- "group": {
- "name": "${cluster-env/user_group}",
- "access": ""
- },
- "configuration": "ams-hbase-security-site/hbase.master.keytab.file"
- }
- },
- {
- "name": "ams_hbase_regionserver_hbase",
- "principal": {
- "value": "amshbase/_HOST@${realm}",
- "type": "service",
- "configuration": "ams-hbase-security-site/hbase.regionserver.kerberos.principal",
- "local_username": "${ams-env/ambari_metrics_user}"
- },
- "keytab": {
- "file": "${keytab_dir}/ams-hbase.regionserver.keytab",
- "owner": {
- "name": "${ams-env/ambari_metrics_user}",
- "access": "r"
- },
- "group": {
- "name": "${cluster-env/user_group}",
- "access": ""
- },
- "configuration": "ams-hbase-security-site/hbase.regionserver.keytab.file"
- }
- },
- {
- "name": "ams_collector",
- "principal": {
- "value": "amshbase/_HOST@${realm}",
- "type": "service",
- "configuration": "ams-hbase-security-site/hbase.myclient.principal",
- "local_username": "${ams-env/ambari_metrics_user}"
- },
- "keytab": {
- "file": "${keytab_dir}/ams.collector.keytab",
- "owner": {
- "name": "${ams-env/ambari_metrics_user}",
- "access": "r"
- },
- "group": {
- "name": "${cluster-env/user_group}",
- "access": ""
- },
- "configuration": "ams-hbase-security-site/hbase.myclient.keytab"
- }
- },
- {
- "name": "ams_zookeeper",
- "principal": {
- "value": "zookeeper/_HOST@${realm}",
- "type": "service",
- "configuration": "ams-hbase-security-site/ams.zookeeper.principal",
- "local_username": "${ams-env/ambari_metrics_user}"
- },
- "keytab": {
- "file": "${keytab_dir}/zk.service.ams.keytab",
- "owner": {
- "name": "${ams-env/ambari_metrics_user}",
- "access": "r"
- },
- "group": {
- "name": "${cluster-env/user_group}",
- "access": ""
- },
- "configuration": "ams-hbase-security-site/ams.zookeeper.keytab"
- }
- }
- ],
- "configurations": [
- {
- "ams-hbase-security-site": {
- "hbase.security.authentication": "kerberos",
- "hbase.security.authorization": "true",
- "hadoop.security.authentication": "kerberos",
- "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
- "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController",
- "zookeeper.znode.parent": "/ams-hbase-secure",
- "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "true",
- "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "true",
- "hbase.zookeeper.property.authProvider.1": "org.apache.zookeeper.server.auth.SASLAuthenticationProvider",
- "hbase.zookeeper.property.jaasLoginRenew": "3600000"
- }
- }
- ]
- }
- ]
- }
- ]
-}
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metainfo.xml
index 5002459c5f..bcbeb15267 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metainfo.xml
@@ -1,147 +1,27 @@
<?xml version="1.0"?>
<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
<metainfo>
<schemaVersion>2.0</schemaVersion>
<services>
<service>
<name>AMBARI_METRICS</name>
- <displayName>Ambari Metrics</displayName>
- <version>0.1.0</version>
- <comment>A system for metrics collection that provides storage and retrieval capability for metrics collected from the cluster
- </comment>
- <components>
- <component>
- <name>METRICS_COLLECTOR</name>
- <displayName>Metrics Collector</displayName>
- <category>MASTER</category>
- <cardinality>1</cardinality>
- <versionAdvertised>false</versionAdvertised>
- <timelineAppid>AMS-HBASE</timelineAppid>
- <dependencies>
- <dependency>
- <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
- <scope>cluster</scope>
- <auto-deploy>
- <enabled>true</enabled>
- </auto-deploy>
- </dependency>
- </dependencies>
- <commandScript>
- <script>scripts/metrics_collector.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>1200</timeout>
- </commandScript>
- </component>
- <component>
- <name>METRICS_MONITOR</name>
- <displayName>Metrics Monitor</displayName>
- <category>SLAVE</category>
- <cardinality>ALL</cardinality>
- <versionAdvertised>false</versionAdvertised>
- <auto-deploy>
- <enabled>true</enabled>
- </auto-deploy>
- <commandScript>
- <script>scripts/metrics_monitor.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>1200</timeout>
- </commandScript>
- </component>
- </components>
-
- <osSpecifics>
- <osSpecific>
- <osFamily>redhat7,redhat6,suse11</osFamily>
- <packages>
- <package>
- <name>ambari-metrics-collector</name>
- <skipUpgrade>true</skipUpgrade>
- <condition>should_install_ams_collector</condition>
- </package>
- <package>
- <name>ambari-metrics-monitor</name>
- <skipUpgrade>true</skipUpgrade>
- </package>
- <package>
- <name>ambari-metrics-hadoop-sink</name>
- <skipUpgrade>true</skipUpgrade>
- </package>
- <package>
- <name>gcc</name>
- </package>
- <package>
- <name>snappy</name>
- </package>
- </packages>
- </osSpecific>
- <osSpecific>
- <osFamily>debian7,ubuntu12,ubuntu14</osFamily>
- <packages>
- <package>
- <name>ambari-metrics-assembly</name>
- <skipUpgrade>true</skipUpgrade>
- </package>
- <package>
- <name>gcc</name>
- </package>
- </packages>
- </osSpecific>
- <osSpecific>
- <osFamily>winsrv6</osFamily>
- <packages>
- <package>
- <name>ambari-metrics-collector.msi</name>
- </package>
- <package>
- <name>ambari-metrics-monitor.msi</name>
- </package>
- <package>
- <name>ambari-metrics-hadoop-sink.msi</name>
- </package>
- </packages>
- </osSpecific>
- </osSpecifics>
-
- <commandScript>
- <script>scripts/service_check.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
-
- <requiredServices>
- <service>ZOOKEEPER</service>
- </requiredServices>
-
- <configuration-dependencies>
- <config-type>ams-site</config-type>
- <config-type>ams-log4j</config-type>
- <config-type>ams-env</config-type>
- <config-type>ams-hbase-policy</config-type>
- <config-type>ams-hbase-site</config-type>
- <config-type>ams-hbase-security-site</config-type>
- <config-type>ams-hbase-env</config-type>
- <config-type>ams-hbase-log4j</config-type>
- </configuration-dependencies>
-
- <excluded-config-types>
- <config-type>storm-site</config-type>
- </excluded-config-types>
-
+ <extends>common-services/AMBARI_METRICS/0.1.0</extends>
</service>
</services>
-</metainfo>
+</metainfo> \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metrics.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metrics.json
deleted file mode 100755
index c12e09afa9..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/metrics.json
+++ /dev/null
@@ -1,2472 +0,0 @@
-{
- "METRICS_COLLECTOR": {
- "Component": [
- {
- "type": "ganglia",
- "metrics": {
- "default": {
- "metrics/hbase/ipc/ProcessCallTime_75th_percentile": {
- "metric": "ipc.IPC.ProcessCallTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_95th_percentile": {
- "metric": "ipc.IPC.ProcessCallTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_99th_percentile": {
- "metric": "ipc.IPC.ProcessCallTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_max": {
- "metric": "ipc.IPC.ProcessCallTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_mean": {
- "metric": "ipc.IPC.ProcessCallTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_median": {
- "metric": "ipc.IPC.ProcessCallTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_min": {
- "metric": "ipc.IPC.ProcessCallTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_num_ops": {
- "metric": "ipc.IPC.ProcessCallTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_75th_percentile": {
- "metric": "ipc.IPC.QueueCallTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_95th_percentile": {
- "metric": "ipc.IPC.QueueCallTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_99th_percentile": {
- "metric": "ipc.IPC.QueueCallTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_max": {
- "metric": "ipc.IPC.QueueCallTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_mean": {
- "metric": "ipc.IPC.QueueCallTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_median": {
- "metric": "ipc.IPC.QueueCallTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_min": {
- "metric": "ipc.IPC.QueueCallTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_num_ops": {
- "metric": "ipc.IPC.QueueCallTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/authenticationFailures": {
- "metric": "ipc.IPC.authenticationFailures",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/authenticationSuccesses": {
- "metric": "ipc.IPC.authenticationSuccesses",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/authorizationFailures": {
- "metric": "ipc.IPC.authorizationFailures",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/authorizationSuccesses": {
- "metric": "ipc.IPC.authorizationSuccesses",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numActiveHandler": {
- "metric": "ipc.IPC.numActiveHandler",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numCallsInGeneralQueue": {
- "metric": "ipc.IPC.numCallsInGeneralQueue",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numCallsInPriorityQueue": {
- "metric": "ipc.IPC.numCallsInPriorityQueue",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numCallsInReplicationQueue": {
- "metric": "ipc.IPC.numCallsInReplicationQueue",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numOpenConnections": {
- "metric": "ipc.IPC.numOpenConnections",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/queueSize": {
- "metric": "ipc.IPC.queueSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/receivedBytes": {
- "metric": "ipc.IPC.receivedBytes",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/sentBytes": {
- "metric": "ipc.IPC.sentBytes",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcCount": {
- "metric": "jvm.JvmMetrics.GcCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcCountConcurrentMarkSweep": {
- "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcCountParNew": {
- "metric": "jvm.JvmMetrics.GcCountParNew",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcTimeMillis": {
- "metric": "jvm.JvmMetrics.GcTimeMillis",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcTimeMillisConcurrentMarkSweep": {
- "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcTimeMillisParNew": {
- "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/LogError": {
- "metric": "jvm.JvmMetrics.LogError",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/LogFatal": {
- "metric": "jvm.JvmMetrics.LogFatal",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/LogInfo": {
- "metric": "jvm.JvmMetrics.LogInfo",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/LogWarn": {
- "metric": "jvm.JvmMetrics.LogWarn",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemHeapCommittedM": {
- "metric": "jvm.JvmMetrics.MemHeapCommittedM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemHeapMaxM": {
- "metric": "jvm.JvmMetrics.MemHeapMaxM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemHeapUsedM": {
- "metric": "jvm.JvmMetrics.MemHeapUsedM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemMaxM": {
- "metric": "jvm.JvmMetrics.MemMaxM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemNonHeapCommittedM": {
- "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemNonHeapMaxM": {
- "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemNonHeapUsedM": {
- "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsBlocked": {
- "metric": "jvm.JvmMetrics.ThreadsBlocked",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsNew": {
- "metric": "jvm.JvmMetrics.ThreadsNew",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsRunnable": {
- "metric": "jvm.JvmMetrics.ThreadsRunnable",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsTerminated": {
- "metric": "jvm.JvmMetrics.ThreadsTerminated",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsTimedWaiting": {
- "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsWaiting": {
- "metric": "jvm.JvmMetrics.ThreadsWaiting",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_75th_percentile": {
- "metric": "master.AssignmentManger.Assign_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_95th_percentile": {
- "metric": "master.AssignmentManger.Assign_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_99th_percentile": {
- "metric": "master.AssignmentManger.Assign_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_max": {
- "metric": "master.AssignmentManger.Assign_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_mean": {
- "metric": "master.AssignmentManger.Assign_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_median": {
- "metric": "master.AssignmentManger.Assign_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_min": {
- "metric": "master.AssignmentManger.Assign_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_num_ops": {
- "metric": "master.AssignmentManger.Assign_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_75th_percentile": {
- "metric": "master.AssignmentManger.BulkAssign_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_95th_percentile": {
- "metric": "master.AssignmentManger.BulkAssign_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_99th_percentile": {
- "metric": "master.AssignmentManger.BulkAssign_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_max": {
- "metric": "master.AssignmentManger.BulkAssign_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_mean": {
- "metric": "master.AssignmentManger.BulkAssign_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_median": {
- "metric": "master.AssignmentManger.BulkAssign_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_min": {
- "metric": "master.AssignmentManger.BulkAssign_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_num_ops": {
- "metric": "master.AssignmentManger.BulkAssign_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/ritCount": {
- "metric": "master.AssignmentManger.ritCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/ritCountOverThreshold": {
- "metric": "master.AssignmentManger.ritCountOverThreshold",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/ritOldestAge": {
- "metric": "master.AssignmentManger.ritOldestAge",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_75th_percentile": {
- "metric": "master.Balancer.BalancerCluster_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_95th_percentile": {
- "metric": "master.Balancer.BalancerCluster_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_99th_percentile": {
- "metric": "master.Balancer.BalancerCluster_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_max": {
- "metric": "master.Balancer.BalancerCluster_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_mean": {
- "metric": "master.Balancer.BalancerCluster_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_median": {
- "metric": "master.Balancer.BalancerCluster_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_min": {
- "metric": "master.Balancer.BalancerCluster_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_num_ops": {
- "metric": "master.Balancer.BalancerCluster_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/miscInvocationCount": {
- "metric": "master.Balancer.miscInvocationCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_75th_percentile": {
- "metric": "master.FileSystem.HlogSplitSize_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_95th_percentile": {
- "metric": "master.FileSystem.HlogSplitSize_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_99th_percentile": {
- "metric": "master.FileSystem.HlogSplitSize_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_max": {
- "metric": "master.FileSystem.HlogSplitSize_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_mean": {
- "metric": "master.FileSystem.HlogSplitSize_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_median": {
- "metric": "master.FileSystem.HlogSplitSize_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_min": {
- "metric": "master.FileSystem.HlogSplitSize_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_num_ops": {
- "metric": "master.FileSystem.HlogSplitSize_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_75th_percentile": {
- "metric": "master.FileSystem.HlogSplitTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_95th_percentile": {
- "metric": "master.FileSystem.HlogSplitTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_99th_percentile": {
- "metric": "master.FileSystem.HlogSplitTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_max": {
- "metric": "master.FileSystem.HlogSplitTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_mean": {
- "metric": "master.FileSystem.HlogSplitTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_median": {
- "metric": "master.FileSystem.HlogSplitTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_min": {
- "metric": "master.FileSystem.HlogSplitTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_num_ops": {
- "metric": "master.FileSystem.HlogSplitTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_75th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitSize_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_95th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitSize_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_99th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitSize_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_max": {
- "metric": "master.FileSystem.MetaHlogSplitSize_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_mean": {
- "metric": "master.FileSystem.MetaHlogSplitSize_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_median": {
- "metric": "master.FileSystem.MetaHlogSplitSize_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_min": {
- "metric": "master.FileSystem.MetaHlogSplitSize_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_num_ops": {
- "metric": "master.FileSystem.MetaHlogSplitSize_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_75th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_95th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_99th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_max": {
- "metric": "master.FileSystem.MetaHlogSplitTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_mean": {
- "metric": "master.FileSystem.MetaHlogSplitTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_median": {
- "metric": "master.FileSystem.MetaHlogSplitTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_min": {
- "metric": "master.FileSystem.MetaHlogSplitTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_num_ops": {
- "metric": "master.FileSystem.MetaHlogSplitTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/AverageLoad": {
- "metric": "master.Server.averageLoad",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/clusterRequests": {
- "metric": "master.Server.clusterRequests",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/masterActiveTime": {
- "metric": "master.Server.masterActiveTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/masterStartTime": {
- "metric": "master.Server.masterStartTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/numDeadRegionServers": {
- "metric": "master.Server.numDeadRegionServers",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/numRegionServers": {
- "metric": "master.Server.numRegionServers",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/DroppedPubAll": {
- "metric": "metricssystem.MetricsSystem.DroppedPubAll",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/NumActiveSinks": {
- "metric": "metricssystem.MetricsSystem.NumActiveSinks",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/NumActiveSources": {
- "metric": "metricssystem.MetricsSystem.NumActiveSources",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/NumAllSinks": {
- "metric": "metricssystem.MetricsSystem.NumAllSinks",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/NumAllSources": {
- "metric": "metricssystem.MetricsSystem.NumAllSources",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/PublishAvgTime": {
- "metric": "metricssystem.MetricsSystem.PublishAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/PublishNumOps": {
- "metric": "metricssystem.MetricsSystem.PublishNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/Sink_timelineAvgTime": {
- "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/Sink_timelineDropped": {
- "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/Sink_timelineNumOps": {
- "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/Sink_timelineQsize": {
- "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/SnapshotAvgTime": {
- "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/SnapshotNumOps": {
- "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_75th_percentile": {
- "metric": "regionserver.Server.Append_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_95th_percentile": {
- "metric": "regionserver.Server.Append_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_99th_percentile": {
- "metric": "regionserver.Server.Append_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_max": {
- "metric": "regionserver.Server.Append_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_mean": {
- "metric": "regionserver.Server.Append_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_median": {
- "metric": "regionserver.Server.Append_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_min": {
- "metric": "regionserver.Server.Append_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_num_ops": {
- "metric": "regionserver.Server.Append_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_75th_percentile": {
- "metric": "regionserver.Server.Delete_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_95th_percentile": {
- "metric": "regionserver.Server.Delete_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_99th_percentile": {
- "metric": "regionserver.Server.Delete_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_max": {
- "metric": "regionserver.Server.Delete_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_mean": {
- "metric": "regionserver.Server.Delete_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_median": {
- "metric": "regionserver.Server.Delete_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_min": {
- "metric": "regionserver.Server.Delete_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_num_ops": {
- "metric": "regionserver.Server.Delete_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_75th_percentile": {
- "metric": "regionserver.Server.Get_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_95th_percentile": {
- "metric": "regionserver.Server.Get_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_99th_percentile": {
- "metric": "regionserver.Server.Get_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_max": {
- "metric": "regionserver.Server.Get_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_mean": {
- "metric": "regionserver.Server.Get_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_median": {
- "metric": "regionserver.Server.Get_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_min": {
- "metric": "regionserver.Server.Get_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_num_ops": {
- "metric": "regionserver.Server.Get_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_75th_percentile": {
- "metric": "regionserver.Server.Increment_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_95th_percentile": {
- "metric": "regionserver.Server.Increment_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_99th_percentile": {
- "metric": "regionserver.Server.Increment_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_max": {
- "metric": "regionserver.Server.Increment_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_mean": {
- "metric": "regionserver.Server.Increment_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_median": {
- "metric": "regionserver.Server.Increment_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_min": {
- "metric": "regionserver.Server.Increment_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_num_ops": {
- "metric": "regionserver.Server.Increment_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_75th_percentile": {
- "metric": "regionserver.Server.Mutate_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_95th_percentile": {
- "metric": "regionserver.Server.Mutate_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_99th_percentile": {
- "metric": "regionserver.Server.Mutate_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_max": {
- "metric": "regionserver.Server.Mutate_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_mean": {
- "metric": "regionserver.Server.Mutate_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_median": {
- "metric": "regionserver.Server.Mutate_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_min": {
- "metric": "regionserver.Server.Mutate_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_num_ops": {
- "metric": "regionserver.Server.Mutate_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_75th_percentile": {
- "metric": "regionserver.Server.Replay_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_95th_percentile": {
- "metric": "regionserver.Server.Replay_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_99th_percentile": {
- "metric": "regionserver.Server.Replay_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_max": {
- "metric": "regionserver.Server.Replay_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_mean": {
- "metric": "regionserver.Server.Replay_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_median": {
- "metric": "regionserver.Server.Replay_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_min": {
- "metric": "regionserver.Server.Replay_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_num_ops": {
- "metric": "regionserver.Server.Replay_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheCount": {
- "metric": "regionserver.Server.blockCacheCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheEvictionCount": {
- "metric": "regionserver.Server.blockCacheEvictionCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheExpressHitPercent": {
- "metric": "regionserver.Server.blockCacheExpressHitPercent",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheFreeSize": {
- "metric": "regionserver.Server.blockCacheFreeSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheHitPercent": {
- "metric": "regionserver.Server.blockCacheCountHitPercent",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheHitCount": {
- "metric": "regionserver.Server.blockCacheHitCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheMissCount": {
- "metric": "regionserver.Server.blockCacheMissCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheSize": {
- "metric": "regionserver.Server.blockCacheSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/checkMutateFailedCount": {
- "metric": "regionserver.Server.checkMutateFailedCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/checkMutatePassedCount": {
- "metric": "regionserver.Server.checkMutatePassedCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/compactionQueueSize": {
- "metric": "regionserver.Server.compactionQueueLength",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/flushQueueLength": {
- "metric": "regionserver.Server.flushQueueLength",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/hlogFileCount": {
- "metric": "regionserver.Server.hlogFileCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/hlogFileSize": {
- "metric": "regionserver.Server.hlogFileSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/memStoreSize": {
- "metric": "regionserver.Server.memStoreSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/mutationsWithoutWALCount": {
- "metric": "regionserver.Server.mutationsWithoutWALCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/mutationsWithoutWALSize": {
- "metric": "regionserver.Server.mutationsWithoutWALSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/percentFilesLocal": {
- "metric": "regionserver.Server.percentFilesLocal",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/readRequestCount": {
- "metric": "regionserver.Server.readRequestCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/regions": {
- "metric": "regionserver.Server.regionCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/regionServerStartTime": {
- "metric": "regionserver.Server.regionServerStartTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowAppendCount": {
- "metric": "regionserver.Server.slowAppendCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowDeleteCount": {
- "metric": "regionserver.Server.slowDeleteCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowGetCount": {
- "metric": "regionserver.Server.slowGetCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowIncrementCount": {
- "metric": "regionserver.Server.slowIncrementCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowPutCount": {
- "metric": "regionserver.Server.slowPutCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/staticBloomSize": {
- "metric": "regionserver.Server.staticBloomSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/staticIndexSize": {
- "metric": "regionserver.Server.staticIndexSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/storeCount": {
- "metric": "regionserver.Server.storeCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/storefiles": {
- "metric": "regionserver.Server.storeFileCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/storeFileIndexSize": {
- "metric": "regionserver.Server.storeFileIndexSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/storeFileSize": {
- "metric": "regionserver.Server.storeFileSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/requests": {
- "metric": "regionserver.Server.totalRequestCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/updatesBlockedTime": {
- "metric": "regionserver.Server.updatesBlockedTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/writeRequestCount": {
- "metric": "regionserver.Server.writeRequestCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_75th_percentile": {
- "metric": "regionserver.WAL.AppendSize_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_95th_percentile": {
- "metric": "regionserver.WAL.AppendSize_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_99th_percentile": {
- "metric": "regionserver.WAL.AppendSize_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_max": {
- "metric": "regionserver.WAL.AppendSize_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_mean": {
- "metric": "regionserver.WAL.AppendSize_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_median": {
- "metric": "regionserver.WAL.AppendSize_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_min": {
- "metric": "regionserver.WAL.AppendSize_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_num_ops": {
- "metric": "regionserver.WAL.AppendSize_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_75th_percentile": {
- "metric": "regionserver.WAL.AppendTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_95th_percentile": {
- "metric": "regionserver.WAL.AppendTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_99th_percentile": {
- "metric": "regionserver.WAL.AppendTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_max": {
- "metric": "regionserver.WAL.AppendTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_mean": {
- "metric": "regionserver.WAL.AppendTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_median": {
- "metric": "regionserver.WAL.AppendTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_min": {
- "metric": "regionserver.WAL.AppendTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_num_ops": {
- "metric": "regionserver.WAL.AppendTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_75th_percentile": {
- "metric": "regionserver.WAL.SyncTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_95th_percentile": {
- "metric": "regionserver.WAL.SyncTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_99th_percentile": {
- "metric": "regionserver.WAL.SyncTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_max": {
- "metric": "regionserver.WAL.SyncTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_mean": {
- "metric": "regionserver.WAL.SyncTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_median": {
- "metric": "regionserver.WAL.SyncTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_min": {
- "metric": "regionserver.WAL.SyncTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_num_ops": {
- "metric": "regionserver.WAL.SyncTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/appendCount": {
- "metric": "regionserver.WAL.appendCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/slowAppendCount": {
- "metric": "regionserver.WAL.slowAppendCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/GetGroupsAvgTime": {
- "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/GetGroupsNumOps": {
- "metric": "ugi.UgiMetrics.GetGroupsNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/LoginFailureAvgTime": {
- "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/LoginFailureNumOps": {
- "metric": "ugi.UgiMetrics.LoginFailureNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/LoginSuccessAvgTime": {
- "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/LoginSuccessNumOps": {
- "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
- "pointInTime": true,
- "temporal": true
- }
- }
- }
- }
- ],
- "HostComponent": [
- {
- "type": "ganglia",
- "metrics": {
- "default": {
- "metrics/hbase/ipc/ProcessCallTime_75th_percentile": {
- "metric": "ipc.IPC.ProcessCallTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_95th_percentile": {
- "metric": "ipc.IPC.ProcessCallTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_99th_percentile": {
- "metric": "ipc.IPC.ProcessCallTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_max": {
- "metric": "ipc.IPC.ProcessCallTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_mean": {
- "metric": "ipc.IPC.ProcessCallTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_median": {
- "metric": "ipc.IPC.ProcessCallTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_min": {
- "metric": "ipc.IPC.ProcessCallTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/ProcessCallTime_num_ops": {
- "metric": "ipc.IPC.ProcessCallTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_75th_percentile": {
- "metric": "ipc.IPC.QueueCallTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_95th_percentile": {
- "metric": "ipc.IPC.QueueCallTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_99th_percentile": {
- "metric": "ipc.IPC.QueueCallTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_max": {
- "metric": "ipc.IPC.QueueCallTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_mean": {
- "metric": "ipc.IPC.QueueCallTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_median": {
- "metric": "ipc.IPC.QueueCallTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_min": {
- "metric": "ipc.IPC.QueueCallTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/QueueCallTime_num_ops": {
- "metric": "ipc.IPC.QueueCallTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/authenticationFailures": {
- "metric": "ipc.IPC.authenticationFailures",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/authenticationSuccesses": {
- "metric": "ipc.IPC.authenticationSuccesses",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/authorizationFailures": {
- "metric": "ipc.IPC.authorizationFailures",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/authorizationSuccesses": {
- "metric": "ipc.IPC.authorizationSuccesses",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numActiveHandler": {
- "metric": "ipc.IPC.numActiveHandler",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numCallsInGeneralQueue": {
- "metric": "ipc.IPC.numCallsInGeneralQueue",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numCallsInPriorityQueue": {
- "metric": "ipc.IPC.numCallsInPriorityQueue",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numCallsInReplicationQueue": {
- "metric": "ipc.IPC.numCallsInReplicationQueue",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/numOpenConnections": {
- "metric": "ipc.IPC.numOpenConnections",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/queueSize": {
- "metric": "ipc.IPC.queueSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/receivedBytes": {
- "metric": "ipc.IPC.receivedBytes",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ipc/sentBytes": {
- "metric": "ipc.IPC.sentBytes",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcCount": {
- "metric": "jvm.JvmMetrics.GcCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcCountConcurrentMarkSweep": {
- "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcCountParNew": {
- "metric": "jvm.JvmMetrics.GcCountParNew",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcTimeMillis": {
- "metric": "jvm.JvmMetrics.GcTimeMillis",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcTimeMillisConcurrentMarkSweep": {
- "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/GcTimeMillisParNew": {
- "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/LogError": {
- "metric": "jvm.JvmMetrics.LogError",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/LogFatal": {
- "metric": "jvm.JvmMetrics.LogFatal",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/LogInfo": {
- "metric": "jvm.JvmMetrics.LogInfo",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/LogWarn": {
- "metric": "jvm.JvmMetrics.LogWarn",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemHeapCommittedM": {
- "metric": "jvm.JvmMetrics.MemHeapCommittedM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemHeapMaxM": {
- "metric": "jvm.JvmMetrics.MemHeapMaxM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemHeapUsedM": {
- "metric": "jvm.JvmMetrics.MemHeapUsedM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemMaxM": {
- "metric": "jvm.JvmMetrics.MemMaxM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemNonHeapCommittedM": {
- "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemNonHeapMaxM": {
- "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/MemNonHeapUsedM": {
- "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsBlocked": {
- "metric": "jvm.JvmMetrics.ThreadsBlocked",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsNew": {
- "metric": "jvm.JvmMetrics.ThreadsNew",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsRunnable": {
- "metric": "jvm.JvmMetrics.ThreadsRunnable",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsTerminated": {
- "metric": "jvm.JvmMetrics.ThreadsTerminated",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsTimedWaiting": {
- "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/jvm/ThreadsWaiting": {
- "metric": "jvm.JvmMetrics.ThreadsWaiting",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_75th_percentile": {
- "metric": "master.AssignmentManger.Assign_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_95th_percentile": {
- "metric": "master.AssignmentManger.Assign_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_99th_percentile": {
- "metric": "master.AssignmentManger.Assign_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_max": {
- "metric": "master.AssignmentManger.Assign_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_mean": {
- "metric": "master.AssignmentManger.Assign_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_median": {
- "metric": "master.AssignmentManger.Assign_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_min": {
- "metric": "master.AssignmentManger.Assign_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/Assign_num_ops": {
- "metric": "master.AssignmentManger.Assign_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_75th_percentile": {
- "metric": "master.AssignmentManger.BulkAssign_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_95th_percentile": {
- "metric": "master.AssignmentManger.BulkAssign_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_99th_percentile": {
- "metric": "master.AssignmentManger.BulkAssign_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_max": {
- "metric": "master.AssignmentManger.BulkAssign_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_mean": {
- "metric": "master.AssignmentManger.BulkAssign_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_median": {
- "metric": "master.AssignmentManger.BulkAssign_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_min": {
- "metric": "master.AssignmentManger.BulkAssign_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/BulkAssign_num_ops": {
- "metric": "master.AssignmentManger.BulkAssign_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/ritCount": {
- "metric": "master.AssignmentManger.ritCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/ritCountOverThreshold": {
- "metric": "master.AssignmentManger.ritCountOverThreshold",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/assignmentManger/ritOldestAge": {
- "metric": "master.AssignmentManger.ritOldestAge",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_75th_percentile": {
- "metric": "master.Balancer.BalancerCluster_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_95th_percentile": {
- "metric": "master.Balancer.BalancerCluster_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_99th_percentile": {
- "metric": "master.Balancer.BalancerCluster_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_max": {
- "metric": "master.Balancer.BalancerCluster_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_mean": {
- "metric": "master.Balancer.BalancerCluster_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_median": {
- "metric": "master.Balancer.BalancerCluster_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_min": {
- "metric": "master.Balancer.BalancerCluster_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/BalancerCluster_num_ops": {
- "metric": "master.Balancer.BalancerCluster_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/balancer/miscInvocationCount": {
- "metric": "master.Balancer.miscInvocationCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_75th_percentile": {
- "metric": "master.FileSystem.HlogSplitSize_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_95th_percentile": {
- "metric": "master.FileSystem.HlogSplitSize_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_99th_percentile": {
- "metric": "master.FileSystem.HlogSplitSize_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_max": {
- "metric": "master.FileSystem.HlogSplitSize_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_mean": {
- "metric": "master.FileSystem.HlogSplitSize_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_median": {
- "metric": "master.FileSystem.HlogSplitSize_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_min": {
- "metric": "master.FileSystem.HlogSplitSize_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitSize_num_ops": {
- "metric": "master.FileSystem.HlogSplitSize_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_75th_percentile": {
- "metric": "master.FileSystem.HlogSplitTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_95th_percentile": {
- "metric": "master.FileSystem.HlogSplitTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_99th_percentile": {
- "metric": "master.FileSystem.HlogSplitTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_max": {
- "metric": "master.FileSystem.HlogSplitTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_mean": {
- "metric": "master.FileSystem.HlogSplitTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_median": {
- "metric": "master.FileSystem.HlogSplitTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_min": {
- "metric": "master.FileSystem.HlogSplitTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/HlogSplitTime_num_ops": {
- "metric": "master.FileSystem.HlogSplitTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_75th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitSize_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_95th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitSize_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_99th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitSize_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_max": {
- "metric": "master.FileSystem.MetaHlogSplitSize_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_mean": {
- "metric": "master.FileSystem.MetaHlogSplitSize_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_median": {
- "metric": "master.FileSystem.MetaHlogSplitSize_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_min": {
- "metric": "master.FileSystem.MetaHlogSplitSize_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitSize_num_ops": {
- "metric": "master.FileSystem.MetaHlogSplitSize_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_75th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_95th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_99th_percentile": {
- "metric": "master.FileSystem.MetaHlogSplitTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_max": {
- "metric": "master.FileSystem.MetaHlogSplitTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_mean": {
- "metric": "master.FileSystem.MetaHlogSplitTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_median": {
- "metric": "master.FileSystem.MetaHlogSplitTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_min": {
- "metric": "master.FileSystem.MetaHlogSplitTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/fileSystem/MetaHlogSplitTime_num_ops": {
- "metric": "master.FileSystem.MetaHlogSplitTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/AverageLoad": {
- "metric": "master.Server.averageLoad",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/clusterRequests": {
- "metric": "master.Server.clusterRequests",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/masterActiveTime": {
- "metric": "master.Server.masterActiveTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/masterStartTime": {
- "metric": "master.Server.masterStartTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/numDeadRegionServers": {
- "metric": "master.Server.numDeadRegionServers",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/master/numRegionServers": {
- "metric": "master.Server.numRegionServers",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/DroppedPubAll": {
- "metric": "metricssystem.MetricsSystem.DroppedPubAll",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/NumActiveSinks": {
- "metric": "metricssystem.MetricsSystem.NumActiveSinks",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/NumActiveSources": {
- "metric": "metricssystem.MetricsSystem.NumActiveSources",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/NumAllSinks": {
- "metric": "metricssystem.MetricsSystem.NumAllSinks",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/NumAllSources": {
- "metric": "metricssystem.MetricsSystem.NumAllSources",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/PublishAvgTime": {
- "metric": "metricssystem.MetricsSystem.PublishAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/PublishNumOps": {
- "metric": "metricssystem.MetricsSystem.PublishNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/Sink_timelineAvgTime": {
- "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/Sink_timelineDropped": {
- "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/Sink_timelineNumOps": {
- "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/Sink_timelineQsize": {
- "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/SnapshotAvgTime": {
- "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/metricssystem/SnapshotNumOps": {
- "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_75th_percentile": {
- "metric": "regionserver.Server.Append_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_95th_percentile": {
- "metric": "regionserver.Server.Append_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_99th_percentile": {
- "metric": "regionserver.Server.Append_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_max": {
- "metric": "regionserver.Server.Append_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_mean": {
- "metric": "regionserver.Server.Append_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_median": {
- "metric": "regionserver.Server.Append_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_min": {
- "metric": "regionserver.Server.Append_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Append_num_ops": {
- "metric": "regionserver.Server.Append_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_75th_percentile": {
- "metric": "regionserver.Server.Delete_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_95th_percentile": {
- "metric": "regionserver.Server.Delete_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_99th_percentile": {
- "metric": "regionserver.Server.Delete_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_max": {
- "metric": "regionserver.Server.Delete_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_mean": {
- "metric": "regionserver.Server.Delete_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_median": {
- "metric": "regionserver.Server.Delete_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_min": {
- "metric": "regionserver.Server.Delete_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Delete_num_ops": {
- "metric": "regionserver.Server.Delete_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_75th_percentile": {
- "metric": "regionserver.Server.Get_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_95th_percentile": {
- "metric": "regionserver.Server.Get_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_99th_percentile": {
- "metric": "regionserver.Server.Get_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_max": {
- "metric": "regionserver.Server.Get_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_mean": {
- "metric": "regionserver.Server.Get_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_median": {
- "metric": "regionserver.Server.Get_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_min": {
- "metric": "regionserver.Server.Get_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Get_num_ops": {
- "metric": "regionserver.Server.Get_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_75th_percentile": {
- "metric": "regionserver.Server.Increment_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_95th_percentile": {
- "metric": "regionserver.Server.Increment_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_99th_percentile": {
- "metric": "regionserver.Server.Increment_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_max": {
- "metric": "regionserver.Server.Increment_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_mean": {
- "metric": "regionserver.Server.Increment_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_median": {
- "metric": "regionserver.Server.Increment_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_min": {
- "metric": "regionserver.Server.Increment_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Increment_num_ops": {
- "metric": "regionserver.Server.Increment_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_75th_percentile": {
- "metric": "regionserver.Server.Mutate_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_95th_percentile": {
- "metric": "regionserver.Server.Mutate_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_99th_percentile": {
- "metric": "regionserver.Server.Mutate_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_max": {
- "metric": "regionserver.Server.Mutate_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_mean": {
- "metric": "regionserver.Server.Mutate_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_median": {
- "metric": "regionserver.Server.Mutate_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_min": {
- "metric": "regionserver.Server.Mutate_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Mutate_num_ops": {
- "metric": "regionserver.Server.Mutate_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_75th_percentile": {
- "metric": "regionserver.Server.Replay_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_95th_percentile": {
- "metric": "regionserver.Server.Replay_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_99th_percentile": {
- "metric": "regionserver.Server.Replay_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_max": {
- "metric": "regionserver.Server.Replay_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_mean": {
- "metric": "regionserver.Server.Replay_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_median": {
- "metric": "regionserver.Server.Replay_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_min": {
- "metric": "regionserver.Server.Replay_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/Replay_num_ops": {
- "metric": "regionserver.Server.Replay_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheCount": {
- "metric": "regionserver.Server.blockCacheCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheEvictionCount": {
- "metric": "regionserver.Server.blockCacheEvictionCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheExpressHitPercent": {
- "metric": "regionserver.Server.blockCacheExpressHitPercent",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheFreeSize": {
- "metric": "regionserver.Server.blockCacheFreeSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheHitPercent": {
- "metric": "regionserver.Server.blockCacheCountHitPercent",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheHitCount": {
- "metric": "regionserver.Server.blockCacheHitCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheMissCount": {
- "metric": "regionserver.Server.blockCacheMissCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/blockCacheSize": {
- "metric": "regionserver.Server.blockCacheSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/checkMutateFailedCount": {
- "metric": "regionserver.Server.checkMutateFailedCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/checkMutatePassedCount": {
- "metric": "regionserver.Server.checkMutatePassedCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/compactionQueueSize": {
- "metric": "regionserver.Server.compactionQueueLength",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/flushQueueLength": {
- "metric": "regionserver.Server.flushQueueLength",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/hlogFileCount": {
- "metric": "regionserver.Server.hlogFileCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/hlogFileSize": {
- "metric": "regionserver.Server.hlogFileSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/memStoreSize": {
- "metric": "regionserver.Server.memStoreSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/mutationsWithoutWALCount": {
- "metric": "regionserver.Server.mutationsWithoutWALCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/mutationsWithoutWALSize": {
- "metric": "regionserver.Server.mutationsWithoutWALSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/percentFilesLocal": {
- "metric": "regionserver.Server.percentFilesLocal",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/readRequestCount": {
- "metric": "regionserver.Server.readRequestCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/regions": {
- "metric": "regionserver.Server.regionCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/regionServerStartTime": {
- "metric": "regionserver.Server.regionServerStartTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowAppendCount": {
- "metric": "regionserver.Server.slowAppendCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowDeleteCount": {
- "metric": "regionserver.Server.slowDeleteCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowGetCount": {
- "metric": "regionserver.Server.slowGetCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowIncrementCount": {
- "metric": "regionserver.Server.slowIncrementCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/slowPutCount": {
- "metric": "regionserver.Server.slowPutCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/staticBloomSize": {
- "metric": "regionserver.Server.staticBloomSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/staticIndexSize": {
- "metric": "regionserver.Server.staticIndexSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/storeCount": {
- "metric": "regionserver.Server.storeCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/storefiles": {
- "metric": "regionserver.Server.storeFileCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/storeFileIndexSize": {
- "metric": "regionserver.Server.storeFileIndexSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/storeFileSize": {
- "metric": "regionserver.Server.storeFileSize",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/requests": {
- "metric": "regionserver.Server.totalRequestCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/updatesBlockedTime": {
- "metric": "regionserver.Server.updatesBlockedTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/writeRequestCount": {
- "metric": "regionserver.Server.writeRequestCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_75th_percentile": {
- "metric": "regionserver.WAL.AppendSize_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_95th_percentile": {
- "metric": "regionserver.WAL.AppendSize_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_99th_percentile": {
- "metric": "regionserver.WAL.AppendSize_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_max": {
- "metric": "regionserver.WAL.AppendSize_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_mean": {
- "metric": "regionserver.WAL.AppendSize_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_median": {
- "metric": "regionserver.WAL.AppendSize_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_min": {
- "metric": "regionserver.WAL.AppendSize_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendSize_num_ops": {
- "metric": "regionserver.WAL.AppendSize_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_75th_percentile": {
- "metric": "regionserver.WAL.AppendTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_95th_percentile": {
- "metric": "regionserver.WAL.AppendTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_99th_percentile": {
- "metric": "regionserver.WAL.AppendTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_max": {
- "metric": "regionserver.WAL.AppendTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_mean": {
- "metric": "regionserver.WAL.AppendTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_median": {
- "metric": "regionserver.WAL.AppendTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_min": {
- "metric": "regionserver.WAL.AppendTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/AppendTime_num_ops": {
- "metric": "regionserver.WAL.AppendTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_75th_percentile": {
- "metric": "regionserver.WAL.SyncTime_75th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_95th_percentile": {
- "metric": "regionserver.WAL.SyncTime_95th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_99th_percentile": {
- "metric": "regionserver.WAL.SyncTime_99th_percentile",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_max": {
- "metric": "regionserver.WAL.SyncTime_max",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_mean": {
- "metric": "regionserver.WAL.SyncTime_mean",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_median": {
- "metric": "regionserver.WAL.SyncTime_median",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_min": {
- "metric": "regionserver.WAL.SyncTime_min",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/SyncTime_num_ops": {
- "metric": "regionserver.WAL.SyncTime_num_ops",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/appendCount": {
- "metric": "regionserver.WAL.appendCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/regionserver/wal/slowAppendCount": {
- "metric": "regionserver.WAL.slowAppendCount",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/GetGroupsAvgTime": {
- "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/GetGroupsNumOps": {
- "metric": "ugi.UgiMetrics.GetGroupsNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/LoginFailureAvgTime": {
- "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/LoginFailureNumOps": {
- "metric": "ugi.UgiMetrics.LoginFailureNumOps",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/LoginSuccessAvgTime": {
- "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/hbase/ugi/LoginSuccessNumOps": {
- "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
- "pointInTime": true,
- "temporal": true
- }
- }
- }
- }
- ]
- }
-} \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/alerts/alert_ambari_metrics_monitor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/alerts/alert_ambari_metrics_monitor.py
deleted file mode 100755
index fa44a7fb63..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/alerts/alert_ambari_metrics_monitor.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import os
-import socket
-
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.core.exceptions import ComponentIsNotRunning
-from ambari_commons import OSCheck, OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-if OSCheck.is_windows_family():
- from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
-RESULT_CODE_OK = 'OK'
-RESULT_CODE_CRITICAL = 'CRITICAL'
-RESULT_CODE_UNKNOWN = 'UNKNOWN'
-
-AMS_MONITOR_PID_DIR = '{{ams-env/metrics_monitor_pid_dir}}'
-
-def get_tokens():
- """
- Returns a tuple of tokens in the format {{site/property}} that will be used
- to build the dictionary passed into execute
- """
- return (AMS_MONITOR_PID_DIR,)
-
-@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
-def is_monitor_process_live(pid_file=None):
- """
- Gets whether the Metrics Monitor Service is running.
- :param pid_file: ignored
- :return: True if the monitor is running, False otherwise
- """
- try:
- check_windows_service_status("AmbariMetricsHostMonitoring")
- ams_monitor_process_running = True
- except:
- ams_monitor_process_running = False
- return ams_monitor_process_running
-
-@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
-def is_monitor_process_live(pid_file):
- """
- Gets whether the Metrics Monitor represented by the specified file is running.
- :param pid_file: the PID file of the monitor to check
- :return: True if the monitor is running, False otherwise
- """
- live = False
-
- try:
- check_process_status(pid_file)
- live = True
- except ComponentIsNotRunning:
- pass
-
- return live
-
-
-def execute(configurations={}, parameters={}, host_name=None):
- """
- Returns a tuple containing the result code and a pre-formatted result label
-
- Keyword arguments:
- configurations (dictionary): a mapping of configuration key to value
- parameters (dictionary): a mapping of script parameter key to value
- host_name (string): the name of this host where the alert is running
- """
-
- if configurations is None:
- return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])
-
- if set([AMS_MONITOR_PID_DIR]).issubset(configurations):
- AMS_MONITOR_PID_PATH = os.path.join(configurations[AMS_MONITOR_PID_DIR], 'ambari-metrics-monitor.pid')
- else:
- return (RESULT_CODE_UNKNOWN, ['The ams_monitor_pid_dir is a required parameter.'])
-
- if host_name is None:
- host_name = socket.getfqdn()
-
- ams_monitor_process_running = is_monitor_process_live(AMS_MONITOR_PID_PATH)
-
- alert_state = RESULT_CODE_OK if ams_monitor_process_running else RESULT_CODE_CRITICAL
-
- alert_label = 'Ambari Monitor is running on {0}' if ams_monitor_process_running else 'Ambari Monitor is NOT running on {0}'
- alert_label = alert_label.format(host_name)
-
- return (alert_state, [alert_label])
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/hbaseSmokeVerify.sh
deleted file mode 100755
index 5c320c0c78..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/hbaseSmokeVerify.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-hbase_cmd=$3
-echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
- exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/AMBARI_METRICS.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/AMBARI_METRICS.txt
deleted file mode 100755
index 6693503b28..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/AMBARI_METRICS.txt
+++ /dev/null
@@ -1,245 +0,0 @@
-regionserver.WAL.SyncTime_min
-regionserver.WAL.SyncTime_num_ops
-regionserver.WAL.appendCount
-regionserver.WAL.slowAppendCount
-jvm.JvmMetrics.GcTimeMillis
-jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep
-jvm.JvmMetrics.GcTimeMillisParNew
-ugi.UgiMetrics.GetGroupsAvgTime
-ugi.UgiMetrics.GetGroupsNumOps
-ugi.UgiMetrics.LoginFailureNumOps
-ugi.UgiMetrics.LoginSuccessAvgTime
-ugi.UgiMetrics.LoginSuccessNumOps
-ugi.UgiMetrics.LoginFailureAvgTime
-jvm.JvmMetrics.LogError
-jvm.JvmMetrics.LogFatal
-jvm.JvmMetrics.LogInfo
-jvm.JvmMetrics.LogWarn
-jvm.JvmMetrics.MemHeapCommittedM
-jvm.JvmMetrics.MemHeapMaxM
-jvm.JvmMetrics.MemHeapUsedM
-jvm.JvmMetrics.MemMaxM
-jvm.JvmMetrics.MemNonHeapCommittedM
-jvm.JvmMetrics.MemNonHeapMaxM
-jvm.JvmMetrics.MemNonHeapUsedM
-jvm.JvmMetrics.ThreadsBlocked
-jvm.JvmMetrics.ThreadsNew
-jvm.JvmMetrics.ThreadsRunnable
-jvm.JvmMetrics.ThreadsTerminated
-jvm.JvmMetrics.ThreadsTimedWaiting
-master.AssignmentManger.Assign_75th_percentile
-master.AssignmentManger.Assign_95th_percentile
-master.AssignmentManger.Assign_99th_percentile
-master.AssignmentManger.Assign_max
-master.AssignmentManger.Assign_mean
-master.AssignmentManger.Assign_median
-master.AssignmentManger.Assign_min
-jvm.JvmMetrics.ThreadsWaiting
-master.AssignmentManger.Assign_num_ops
-master.AssignmentManger.BulkAssign_75th_percentile
-master.AssignmentManger.BulkAssign_95th_percentile
-master.AssignmentManger.BulkAssign_99th_percentile
-master.AssignmentManger.BulkAssign_max
-master.AssignmentManger.BulkAssign_mean
-master.AssignmentManger.BulkAssign_median
-master.AssignmentManger.BulkAssign_min
-master.AssignmentManger.BulkAssign_num_ops
-master.AssignmentManger.ritCount
-master.AssignmentManger.ritCountOverThreshold
-master.AssignmentManger.ritOldestAge
-master.Balancer.BalancerCluster_75th_percentile
-master.Balancer.BalancerCluster_95th_percentile
-master.Balancer.BalancerCluster_99th_percentile
-master.Balancer.BalancerCluster_max
-master.Balancer.BalancerCluster_mean
-master.Balancer.BalancerCluster_median
-master.Balancer.BalancerCluster_min
-master.Balancer.BalancerCluster_num_ops
-master.Balancer.miscInvocationCount
-master.FileSystem.HlogSplitSize_75th_percentile
-master.FileSystem.HlogSplitSize_95th_percentile
-master.FileSystem.HlogSplitSize_99th_percentile
-master.FileSystem.HlogSplitSize_max
-master.FileSystem.HlogSplitSize_mean
-master.FileSystem.HlogSplitSize_median
-master.FileSystem.HlogSplitSize_min
-master.FileSystem.HlogSplitSize_num_ops
-master.FileSystem.HlogSplitTime_75th_percentile
-master.FileSystem.HlogSplitTime_95th_percentile
-master.FileSystem.HlogSplitTime_99th_percentile
-master.FileSystem.HlogSplitTime_max
-master.FileSystem.HlogSplitTime_mean
-master.FileSystem.HlogSplitTime_median
-master.FileSystem.HlogSplitTime_min
-master.FileSystem.HlogSplitTime_num_ops
-master.FileSystem.MetaHlogSplitSize_75th_percentile
-master.FileSystem.MetaHlogSplitSize_95th_percentile
-master.FileSystem.MetaHlogSplitSize_99th_percentile
-master.FileSystem.MetaHlogSplitSize_max
-master.FileSystem.MetaHlogSplitSize_mean
-master.FileSystem.MetaHlogSplitSize_median
-master.FileSystem.MetaHlogSplitSize_min
-master.FileSystem.MetaHlogSplitSize_num_ops
-master.FileSystem.MetaHlogSplitTime_75th_percentile
-master.FileSystem.MetaHlogSplitTime_95th_percentile
-master.FileSystem.MetaHlogSplitTime_99th_percentile
-master.FileSystem.MetaHlogSplitTime_max
-master.FileSystem.MetaHlogSplitTime_mean
-master.FileSystem.MetaHlogSplitTime_median
-master.FileSystem.MetaHlogSplitTime_min
-master.FileSystem.MetaHlogSplitTime_num_ops
-master.Server.averageLoad
-master.Server.clusterRequests
-master.Server.masterActiveTime
-master.Server.masterStartTime
-master.Server.numDeadRegionServers
-master.Server.numRegionServers
-metricssystem.MetricsSystem.DroppedPubAll
-metricssystem.MetricsSystem.NumActiveSinks
-ipc.IPC.ProcessCallTime_75th_percentile
-ipc.IPC.ProcessCallTime_95th_percentile
-metricssystem.MetricsSystem.NumActiveSources
-metricssystem.MetricsSystem.NumAllSinks
-ipc.IPC.ProcessCallTime_99th_percentile
-metricssystem.MetricsSystem.NumAllSources
-metricssystem.MetricsSystem.PublishAvgTime
-metricssystem.MetricsSystem.PublishNumOps
-ipc.IPC.ProcessCallTime_max
-ipc.IPC.ProcessCallTime_mean
-metricssystem.MetricsSystem.Sink_timelineAvgTime
-ipc.IPC.ProcessCallTime_median
-metricssystem.MetricsSystem.Sink_timelineDropped
-metricssystem.MetricsSystem.Sink_timelineNumOps
-ipc.IPC.ProcessCallTime_num_ops
-metricssystem.MetricsSystem.Sink_timelineQsize
-metricssystem.MetricsSystem.SnapshotAvgTime
-ipc.IPC.QueueCallTime_95th_percentile
-metricssystem.MetricsSystem.SnapshotNumOps
-ipc.IPC.ProcessCallTime_min
-ipc.IPC.QueueCallTime_75th_percentile
-ipc.IPC.QueueCallTime_99th_percentile
-ipc.IPC.QueueCallTime_max
-ipc.IPC.QueueCallTime_mean
-ipc.IPC.QueueCallTime_median
-ipc.IPC.QueueCallTime_min
-regionserver.Server.Append_75th_percentile
-regionserver.Server.Append_95th_percentile
-ipc.IPC.QueueCallTime_num_ops
-ipc.IPC.authenticationFailures
-regionserver.Server.Append_99th_percentile
-regionserver.Server.Append_max
-ipc.IPC.authenticationSuccesses
-regionserver.Server.Append_mean
-regionserver.Server.Append_median
-regionserver.Server.Append_min
-regionserver.Server.Append_num_ops
-regionserver.Server.Delete_75th_percentile
-regionserver.Server.Delete_95th_percentile
-ipc.IPC.authorizationFailures
-regionserver.Server.Delete_99th_percentile
-regionserver.Server.Delete_max
-regionserver.Server.Delete_mean
-regionserver.Server.Delete_median
-regionserver.Server.Delete_min
-regionserver.Server.Delete_num_ops
-ipc.IPC.authorizationSuccesses
-ipc.IPC.numActiveHandler
-ipc.IPC.numCallsInGeneralQueue
-regionserver.Server.Get_75th_percentile
-regionserver.Server.Get_95th_percentile
-regionserver.Server.Get_99th_percentile
-regionserver.Server.Get_max
-regionserver.Server.Get_mean
-regionserver.Server.Get_median
-ipc.IPC.numCallsInPriorityQueue
-regionserver.Server.Get_min
-regionserver.Server.Get_num_ops
-regionserver.Server.Increment_75th_percentile
-regionserver.Server.Increment_95th_percentile
-regionserver.Server.Increment_99th_percentile
-regionserver.Server.Increment_max
-regionserver.Server.Increment_mean
-regionserver.Server.Increment_median
-ipc.IPC.numCallsInReplicationQueue
-ipc.IPC.numOpenConnections
-regionserver.Server.Increment_min
-regionserver.Server.Increment_num_ops
-ipc.IPC.queueSize
-regionserver.Server.Mutate_75th_percentile
-regionserver.Server.Mutate_95th_percentile
-regionserver.Server.Mutate_99th_percentile
-regionserver.Server.Mutate_max
-regionserver.Server.Mutate_mean
-regionserver.Server.Mutate_median
-ipc.IPC.receivedBytes
-regionserver.Server.Mutate_min
-regionserver.Server.Mutate_num_ops
-regionserver.Server.Replay_75th_percentile
-regionserver.Server.Replay_95th_percentile
-regionserver.Server.Replay_99th_percentile
-regionserver.Server.Replay_max
-regionserver.Server.Replay_mean
-regionserver.Server.Replay_median
-ipc.IPC.sentBytes
-jvm.JvmMetrics.GcCount
-regionserver.Server.Replay_min
-regionserver.Server.Replay_num_ops
-regionserver.Server.blockCacheCount
-regionserver.Server.blockCacheEvictionCount
-regionserver.Server.blockCacheExpressHitPercent
-regionserver.Server.blockCacheFreeSize
-regionserver.Server.blockCacheHitCount
-regionserver.Server.blockCacheMissCount
-regionserver.Server.blockCacheSize
-regionserver.Server.blockCountHitPercent
-regionserver.Server.checkMutateFailedCount
-regionserver.Server.checkMutatePassedCount
-regionserver.Server.compactionQueueLength
-regionserver.Server.flushQueueLength
-jvm.JvmMetrics.GcCountConcurrentMarkSweep
-regionserver.Server.hlogFileCount
-regionserver.Server.hlogFileSize
-regionserver.Server.memStoreSize
-regionserver.Server.mutationsWithoutWALCount
-regionserver.Server.mutationsWithoutWALSize
-regionserver.Server.percentFilesLocal
-regionserver.Server.readRequestCount
-regionserver.Server.regionCount
-regionserver.Server.regionServerStartTime
-regionserver.Server.slowAppendCount
-regionserver.Server.slowDeleteCount
-regionserver.Server.slowGetCount
-regionserver.Server.slowIncrementCount
-regionserver.Server.slowPutCount
-regionserver.Server.staticBloomSize
-regionserver.Server.staticIndexSize
-regionserver.Server.storeCount
-regionserver.Server.storeFileCount
-regionserver.Server.storeFileIndexSize
-regionserver.Server.storeFileSize
-regionserver.Server.totalRequestCount
-regionserver.Server.updatesBlockedTime
-regionserver.Server.writeRequestCount
-regionserver.WAL.AppendSize_75th_percentile
-regionserver.WAL.AppendSize_95th_percentile
-regionserver.WAL.AppendSize_99th_percentile
-regionserver.WAL.AppendSize_max
-regionserver.WAL.AppendSize_mean
-regionserver.WAL.AppendSize_median
-regionserver.WAL.SyncTime_median
-jvm.JvmMetrics.GcCountParNew
-regionserver.WAL.AppendSize_min
-regionserver.WAL.AppendSize_num_ops
-regionserver.WAL.SyncTime_max
-regionserver.WAL.AppendTime_75th_percentile
-regionserver.WAL.AppendTime_95th_percentile
-regionserver.WAL.AppendTime_99th_percentile
-regionserver.WAL.AppendTime_max
-regionserver.WAL.SyncTime_95th_percentile
-regionserver.WAL.AppendTime_mean
-regionserver.WAL.AppendTime_median
-regionserver.WAL.AppendTime_min
-regionserver.WAL.AppendTime_num_ops
-regionserver.WAL.SyncTime_75th_percentile
-regionserver.WAL.SyncTime_99th_percentile
-regionserver.WAL.SyncTime_mean
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/FLUME.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/FLUME.txt
deleted file mode 100755
index b3bfec319e..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/FLUME.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-BatchCompleteCount
-BatchUnderflowCount
-EventTakeSuccessCount
-ConnectionClosedCount
-ConnectionCreatedCount
-ChannelCapacity
-ConnectionFailedCount
-EventDrainAttemptCount
-ChannelFillPercentage
-EventDrainSuccessCount
-BatchEmptyCount
-EventPutAttemptCount
-ChannelSize
-EventPutSuccessCount
-StartTime
-StopTime
-EventTakeAttemptCount
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HBASE.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HBASE.txt
deleted file mode 100755
index 0067403986..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HBASE.txt
+++ /dev/null
@@ -1,588 +0,0 @@
-regionserver.WAL.SyncTime_min
-regionserver.WAL.SyncTime_num_ops
-regionserver.WAL.appendCount
-regionserver.WAL.slowAppendCount
-jvm.JvmMetrics.GcTimeMillis
-jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep
-jvm.JvmMetrics.GcTimeMillisParNew
-ugi.UgiMetrics.GetGroupsAvgTime
-ugi.UgiMetrics.GetGroupsNumOps
-ugi.UgiMetrics.LoginFailureNumOps
-ugi.UgiMetrics.LoginSuccessAvgTime
-ugi.UgiMetrics.LoginSuccessNumOps
-ugi.UgiMetrics.LoginFailureAvgTime
-jvm.JvmMetrics.LogError
-jvm.JvmMetrics.LogFatal
-jvm.JvmMetrics.LogInfo
-jvm.JvmMetrics.LogWarn
-jvm.JvmMetrics.MemHeapCommittedM
-jvm.JvmMetrics.MemHeapMaxM
-jvm.JvmMetrics.MemHeapUsedM
-jvm.JvmMetrics.MemMaxM
-jvm.JvmMetrics.MemNonHeapCommittedM
-jvm.JvmMetrics.MemNonHeapMaxM
-jvm.JvmMetrics.MemNonHeapUsedM
-jvm.JvmMetrics.ThreadsBlocked
-jvm.JvmMetrics.ThreadsNew
-jvm.JvmMetrics.ThreadsRunnable
-jvm.JvmMetrics.ThreadsTerminated
-jvm.JvmMetrics.ThreadsTimedWaiting
-master.AssignmentManger.Assign_75th_percentile
-master.AssignmentManger.Assign_95th_percentile
-master.AssignmentManger.Assign_99th_percentile
-master.AssignmentManger.Assign_max
-master.AssignmentManger.Assign_mean
-master.AssignmentManger.Assign_median
-master.AssignmentManger.Assign_min
-jvm.JvmMetrics.ThreadsWaiting
-master.AssignmentManger.Assign_num_ops
-master.AssignmentManger.BulkAssign_75th_percentile
-master.AssignmentManger.BulkAssign_95th_percentile
-master.AssignmentManger.BulkAssign_99th_percentile
-master.AssignmentManger.BulkAssign_max
-master.AssignmentManger.BulkAssign_mean
-master.AssignmentManger.BulkAssign_median
-master.AssignmentManger.BulkAssign_min
-master.AssignmentManger.BulkAssign_num_ops
-master.AssignmentManger.ritCount
-master.AssignmentManger.ritCountOverThreshold
-master.AssignmentManger.ritOldestAge
-master.Balancer.BalancerCluster_75th_percentile
-master.Balancer.BalancerCluster_95th_percentile
-master.Balancer.BalancerCluster_99th_percentile
-master.Balancer.BalancerCluster_max
-master.Balancer.BalancerCluster_mean
-master.Balancer.BalancerCluster_median
-master.Balancer.BalancerCluster_min
-master.Balancer.BalancerCluster_num_ops
-master.Balancer.miscInvocationCount
-master.FileSystem.HlogSplitSize_75th_percentile
-master.FileSystem.HlogSplitSize_95th_percentile
-master.FileSystem.HlogSplitSize_99th_percentile
-master.FileSystem.HlogSplitSize_max
-master.FileSystem.HlogSplitSize_mean
-master.FileSystem.HlogSplitSize_median
-master.FileSystem.HlogSplitSize_min
-master.FileSystem.HlogSplitSize_num_ops
-master.FileSystem.HlogSplitTime_75th_percentile
-master.FileSystem.HlogSplitTime_95th_percentile
-master.FileSystem.HlogSplitTime_99th_percentile
-master.FileSystem.HlogSplitTime_max
-master.FileSystem.HlogSplitTime_mean
-master.FileSystem.HlogSplitTime_median
-master.FileSystem.HlogSplitTime_min
-master.FileSystem.HlogSplitTime_num_ops
-master.FileSystem.MetaHlogSplitSize_75th_percentile
-master.FileSystem.MetaHlogSplitSize_95th_percentile
-master.FileSystem.MetaHlogSplitSize_99th_percentile
-master.FileSystem.MetaHlogSplitSize_max
-master.FileSystem.MetaHlogSplitSize_mean
-master.FileSystem.MetaHlogSplitSize_median
-master.FileSystem.MetaHlogSplitSize_min
-master.FileSystem.MetaHlogSplitSize_num_ops
-master.FileSystem.MetaHlogSplitTime_75th_percentile
-master.FileSystem.MetaHlogSplitTime_95th_percentile
-master.FileSystem.MetaHlogSplitTime_99th_percentile
-master.FileSystem.MetaHlogSplitTime_max
-master.FileSystem.MetaHlogSplitTime_mean
-master.FileSystem.MetaHlogSplitTime_median
-master.FileSystem.MetaHlogSplitTime_min
-master.FileSystem.MetaHlogSplitTime_num_ops
-master.Server.averageLoad
-master.Server.clusterRequests
-master.Server.masterActiveTime
-master.Server.masterStartTime
-master.Server.numDeadRegionServers
-master.Server.numRegionServers
-metricssystem.MetricsSystem.DroppedPubAll
-metricssystem.MetricsSystem.NumActiveSinks
-ipc.IPC.ProcessCallTime_75th_percentile
-ipc.IPC.ProcessCallTime_95th_percentile
-metricssystem.MetricsSystem.NumActiveSources
-metricssystem.MetricsSystem.NumAllSinks
-ipc.IPC.ProcessCallTime_99th_percentile
-metricssystem.MetricsSystem.NumAllSources
-metricssystem.MetricsSystem.PublishAvgTime
-metricssystem.MetricsSystem.PublishNumOps
-ipc.IPC.ProcessCallTime_max
-ipc.IPC.ProcessCallTime_mean
-metricssystem.MetricsSystem.Sink_timelineAvgTime
-ipc.IPC.ProcessCallTime_median
-metricssystem.MetricsSystem.Sink_timelineDropped
-metricssystem.MetricsSystem.Sink_timelineNumOps
-ipc.IPC.ProcessCallTime_num_ops
-metricssystem.MetricsSystem.Sink_timelineQsize
-metricssystem.MetricsSystem.SnapshotAvgTime
-ipc.IPC.QueueCallTime_95th_percentile
-metricssystem.MetricsSystem.SnapshotNumOps
-ipc.IPC.ProcessCallTime_min
-ipc.IPC.QueueCallTime_75th_percentile
-ipc.IPC.QueueCallTime_99th_percentile
-ipc.IPC.QueueCallTime_max
-ipc.IPC.QueueCallTime_mean
-ipc.IPC.QueueCallTime_median
-ipc.IPC.QueueCallTime_min
-regionserver.Server.Append_75th_percentile
-regionserver.Server.Append_95th_percentile
-ipc.IPC.QueueCallTime_num_ops
-ipc.IPC.authenticationFailures
-regionserver.Server.Append_99th_percentile
-regionserver.Server.Append_max
-ipc.IPC.authenticationSuccesses
-regionserver.Server.Append_mean
-regionserver.Server.Append_median
-regionserver.Server.Append_min
-regionserver.Server.Append_num_ops
-regionserver.Server.Delete_75th_percentile
-regionserver.Server.Delete_95th_percentile
-ipc.IPC.authorizationFailures
-regionserver.Server.Delete_99th_percentile
-regionserver.Server.Delete_max
-regionserver.Server.Delete_mean
-regionserver.Server.Delete_median
-regionserver.Server.Delete_min
-regionserver.Server.Delete_num_ops
-ipc.IPC.authorizationSuccesses
-ipc.IPC.numActiveHandler
-ipc.IPC.numCallsInGeneralQueue
-regionserver.Server.Get_75th_percentile
-regionserver.Server.Get_95th_percentile
-regionserver.Server.Get_99th_percentile
-regionserver.Server.Get_max
-regionserver.Server.Get_mean
-regionserver.Server.Get_median
-ipc.IPC.numCallsInPriorityQueue
-regionserver.Server.Get_min
-regionserver.Server.Get_num_ops
-regionserver.Server.Increment_75th_percentile
-regionserver.Server.Increment_95th_percentile
-regionserver.Server.Increment_99th_percentile
-regionserver.Server.Increment_max
-regionserver.Server.Increment_mean
-regionserver.Server.Increment_median
-ipc.IPC.numCallsInReplicationQueue
-ipc.IPC.numOpenConnections
-regionserver.Server.Increment_min
-regionserver.Server.Increment_num_ops
-ipc.IPC.queueSize
-regionserver.Server.Mutate_75th_percentile
-regionserver.Server.Mutate_95th_percentile
-regionserver.Server.Mutate_99th_percentile
-regionserver.Server.Mutate_max
-regionserver.Server.Mutate_mean
-regionserver.Server.Mutate_median
-ipc.IPC.receivedBytes
-regionserver.Server.Mutate_min
-regionserver.Server.Mutate_num_ops
-regionserver.Server.Replay_75th_percentile
-regionserver.Server.Replay_95th_percentile
-regionserver.Server.Replay_99th_percentile
-regionserver.Server.Replay_max
-regionserver.Server.Replay_mean
-regionserver.Server.Replay_median
-ipc.IPC.sentBytes
-jvm.JvmMetrics.GcCount
-regionserver.Server.Replay_min
-regionserver.Server.Replay_num_ops
-regionserver.Server.blockCacheCount
-regionserver.Server.blockCacheEvictionCount
-regionserver.Server.blockCacheExpressHitPercent
-regionserver.Server.blockCacheFreeSize
-regionserver.Server.blockCacheHitCount
-regionserver.Server.blockCacheMissCount
-regionserver.Server.blockCacheSize
-regionserver.Server.blockCountHitPercent
-regionserver.Server.checkMutateFailedCount
-regionserver.Server.checkMutatePassedCount
-regionserver.Server.compactionQueueLength
-regionserver.Server.flushQueueLength
-jvm.JvmMetrics.GcCountConcurrentMarkSweep
-regionserver.Server.hlogFileCount
-regionserver.Server.hlogFileSize
-regionserver.Server.memStoreSize
-regionserver.Server.mutationsWithoutWALCount
-regionserver.Server.mutationsWithoutWALSize
-regionserver.Server.percentFilesLocal
-regionserver.Server.readRequestCount
-regionserver.Server.regionCount
-regionserver.Server.regionServerStartTime
-regionserver.Server.slowAppendCount
-regionserver.Server.slowDeleteCount
-regionserver.Server.slowGetCount
-regionserver.Server.slowIncrementCount
-regionserver.Server.slowPutCount
-regionserver.Server.staticBloomSize
-regionserver.Server.staticIndexSize
-regionserver.Server.storeCount
-regionserver.Server.storeFileCount
-regionserver.Server.storeFileIndexSize
-regionserver.Server.storeFileSize
-regionserver.Server.totalRequestCount
-regionserver.Server.updatesBlockedTime
-regionserver.Server.writeRequestCount
-regionserver.WAL.AppendSize_75th_percentile
-regionserver.WAL.AppendSize_95th_percentile
-regionserver.WAL.AppendSize_99th_percentile
-regionserver.WAL.AppendSize_max
-regionserver.WAL.AppendSize_mean
-regionserver.WAL.AppendSize_median
-regionserver.WAL.SyncTime_median
-jvm.JvmMetrics.GcCountParNew
-regionserver.WAL.AppendSize_min
-regionserver.WAL.AppendSize_num_ops
-regionserver.WAL.SyncTime_max
-regionserver.WAL.AppendTime_75th_percentile
-regionserver.WAL.AppendTime_95th_percentile
-regionserver.WAL.AppendTime_99th_percentile
-regionserver.WAL.AppendTime_max
-regionserver.WAL.SyncTime_95th_percentile
-regionserver.WAL.AppendTime_mean
-regionserver.WAL.AppendTime_median
-regionserver.WAL.AppendTime_min
-regionserver.WAL.AppendTime_num_ops
-regionserver.WAL.SyncTime_75th_percentile
-regionserver.WAL.SyncTime_99th_percentile
-regionserver.WAL.SyncTime_mean
-regionserver.WAL.SyncTime_median
-regionserver.WAL.SyncTime_min
-regionserver.WAL.SyncTime_num_ops
-regionserver.WAL.appendCount
-regionserver.Server.majorCompactedCellsSize
-regionserver.WAL.rollRequest
-regionserver.WAL.AppendTime_99th_percentile
-regionserver.WAL.slowAppendCount
-regionserver.WAL.AppendTime_num_ops
-regionserver.WAL.SyncTime_95th_percentile
-regionserver.Server.Mutate_median
-regionserver.WAL.AppendTime_75th_percentile
-regionserver.WAL.AppendSize_num_ops
-regionserver.Server.Mutate_max
-regionserver.WAL.AppendSize_min
-regionserver.WAL.AppendTime_min
-regionserver.WAL.SyncTime_99th_percentile
-regionserver.Server.Mutate_95th_percentile
-regionserver.WAL.AppendSize_mean
-regionserver.WAL.SyncTime_mean
-regionserver.WAL.AppendSize_99th_percentile
-jvm.JvmMetrics.GcTimeMillis
-regionserver.WAL.AppendSize_75th_percentile
-jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep
-regionserver.WAL.SyncTime_max
-regionserver.Server.Increment_median
-regionserver.Server.updatesBlockedTime
-regionserver.Server.Increment_max
-ugi.UgiMetrics.GetGroupsAvgTime
-regionserver.WAL.lowReplicaRollRequest
-ugi.UgiMetrics.GetGroupsNumOps
-regionserver.Server.storeFileSize
-regionserver.Server.Increment_95th_percentile
-jvm.JvmMetrics.GcTimeMillisParNew
-ugi.UgiMetrics.LoginFailureAvgTime
-ugi.UgiMetrics.LoginFailureNumOps
-regionserver.Server.storeFileCount
-ugi.UgiMetrics.LoginSuccessNumOps
-regionserver.Server.staticIndexSize
-jvm.JvmMetrics.LogError
-regionserver.Server.splitQueueLength
-regionserver.Server.Get_median
-regionserver.Server.slowPutCount
-regionserver.Server.Get_max
-jvm.JvmMetrics.LogFatal
-regionserver.Server.slowGetCount
-jvm.JvmMetrics.LogInfo
-regionserver.Server.slowAppendCount
-regionserver.Server.Get_95th_percentile
-jvm.JvmMetrics.LogWarn
-regionserver.Server.regionCount
-regionserver.Server.FlushTime_num_ops
-regionserver.Server.FlushTime_min
-regionserver.Server.readRequestCount
-jvm.JvmMetrics.MemHeapCommittedM
-regionserver.Server.percentFilesLocalSecondaryRegions
-regionserver.Server.percentFilesLocal
-regionserver.Server.FlushTime_max
-regionserver.Server.FlushTime_99th_percentile
-regionserver.Server.FlushTime_95th_percentile
-regionserver.Server.Delete_num_ops
-jvm.JvmMetrics.MemHeapMaxM
-regionserver.Server.mutationsWithoutWALCount
-jvm.JvmMetrics.MemHeapUsedM
-regionserver.Server.Delete_median
-regionserver.Server.ScanNext_max
-regionserver.Server.ScanNext_99th_percentile
-regionserver.Server.majorCompactedCellsCount
-regionserver.Server.hlogFileSize
-regionserver.Server.flushedCellsCount
-jvm.JvmMetrics.MemMaxM
-regionserver.Server.hlogFileCount
-regionserver.Server.Delete_95th_percentile
-jvm.JvmMetrics.MemNonHeapCommittedM
-jvm.JvmMetrics.MemNonHeapMaxM
-jvm.JvmMetrics.MemNonHeapUsedM
-regionserver.Server.Append_num_ops
-regionserver.Server.flushQueueLength
-jvm.JvmMetrics.ThreadsBlocked
-regionserver.Server.Append_median
-jvm.JvmMetrics.ThreadsNew
-regionserver.Server.checkMutatePassedCount
-regionserver.Server.compactedCellsSize
-jvm.JvmMetrics.ThreadsRunnable
-jvm.JvmMetrics.ThreadsTerminated
-jvm.JvmMetrics.ThreadsTimedWaiting
-master.AssignmentManger.Assign_75th_percentile
-master.AssignmentManger.Assign_95th_percentile
-master.AssignmentManger.Assign_99th_percentile
-master.AssignmentManger.Assign_max
-regionserver.Server.Append_95th_percentile
-master.AssignmentManger.Assign_mean
-master.AssignmentManger.Assign_median
-regionserver.Replication.sink.appliedOps
-regionserver.Replication.sink.appliedBatches
-regionserver.Replication.sink.ageOfLastAppliedOp
-regionserver.WAL.SyncTime_75th_percentile
-regionserver.RegionServer.receivedBytes
-regionserver.RegionServer.queueSize
-regionserver.RegionServer.numOpenConnections
-regionserver.RegionServer.numCallsInPriorityQueue
-regionserver.Server.Replay_num_ops
-master.AssignmentManger.Assign_min
-master.AssignmentManger.Assign_num_ops
-regionserver.Server.checkMutateFailedCount
-regionserver.RegionServer.exceptions.RegionTooBusyException
-regionserver.RegionServer.exceptions.RegionMovedException
-regionserver.RegionServer.exceptions.OutOfOrderScannerNextException
-master.AssignmentManger.BulkAssign_75th_percentile
-master.AssignmentManger.BulkAssign_95th_percentile
-regionserver.RegionServer.exceptions.FailedSanityCheckException
-regionserver.RegionServer.exceptions
-regionserver.RegionServer.authorizationSuccesses
-regionserver.RegionServer.authenticationSuccesses
-regionserver.RegionServer.authenticationFailures
-regionserver.RegionServer.TotalCallTime_num_ops
-master.AssignmentManger.BulkAssign_99th_percentile
-jvm.JvmMetrics.ThreadsWaiting
-regionserver.RegionServer.TotalCallTime_median
-regionserver.RegionServer.TotalCallTime_mean
-master.AssignmentManger.BulkAssign_max
-regionserver.RegionServer.TotalCallTime_95th_percentile
-regionserver.RegionServer.TotalCallTime_75th_percentile
-regionserver.RegionServer.QueueCallTime_num_ops
-master.AssignmentManger.BulkAssign_mean
-master.AssignmentManger.BulkAssign_median
-regionserver.RegionServer.QueueCallTime_median
-regionserver.RegionServer.QueueCallTime_mean
-regionserver.RegionServer.QueueCallTime_max
-regionserver.RegionServer.QueueCallTime_95th_percentile
-regionserver.RegionServer.QueueCallTime_75th_percentile
-regionserver.RegionServer.ProcessCallTime_num_ops
-regionserver.RegionServer.ProcessCallTime_median
-regionserver.RegionServer.ProcessCallTime_mean
-regionserver.Server.ScanNext_num_ops
-master.AssignmentManger.BulkAssign_num_ops
-master.AssignmentManger.BulkAssign_min
-regionserver.RegionServer.ProcessCallTime_95th_percentile
-master.AssignmentManger.ritCount
-master.AssignmentManger.ritCountOverThreshold
-master.AssignmentManger.ritOldestAge
-master.Balancer.BalancerCluster_75th_percentile
-master.Balancer.BalancerCluster_95th_percentile
-master.Balancer.BalancerCluster_99th_percentile
-ugi.UgiMetrics.LoginSuccessAvgTime
-master.Balancer.BalancerCluster_max
-master.Balancer.BalancerCluster_mean
-master.Balancer.BalancerCluster_median
-master.Balancer.BalancerCluster_min
-regionserver.Server.ScanNext_median
-master.Balancer.BalancerCluster_num_ops
-master.Balancer.miscInvocationCount
-master.FileSystem.HlogSplitSize_75th_percentile
-master.FileSystem.HlogSplitSize_95th_percentile
-master.FileSystem.HlogSplitSize_max
-master.FileSystem.HlogSplitSize_99th_percentile
-master.FileSystem.HlogSplitSize_mean
-master.FileSystem.HlogSplitSize_median
-master.FileSystem.HlogSplitSize_min
-master.FileSystem.HlogSplitSize_num_ops
-master.FileSystem.HlogSplitTime_75th_percentile
-master.FileSystem.HlogSplitTime_95th_percentile
-regionserver.Server.SplitTime_median
-master.FileSystem.HlogSplitTime_max
-master.FileSystem.HlogSplitTime_99th_percentile
-master.FileSystem.HlogSplitTime_mean
-master.FileSystem.HlogSplitTime_median
-master.FileSystem.HlogSplitTime_min
-master.FileSystem.HlogSplitTime_num_ops
-master.FileSystem.MetaHlogSplitSize_75th_percentile
-master.FileSystem.MetaHlogSplitSize_95th_percentile
-master.FileSystem.MetaHlogSplitSize_max
-master.FileSystem.MetaHlogSplitSize_99th_percentile
-master.FileSystem.MetaHlogSplitSize_mean
-master.FileSystem.MetaHlogSplitSize_median
-master.FileSystem.MetaHlogSplitSize_min
-master.FileSystem.MetaHlogSplitSize_num_ops
-master.FileSystem.MetaHlogSplitTime_75th_percentile
-master.FileSystem.MetaHlogSplitTime_95th_percentile
-master.FileSystem.MetaHlogSplitTime_max
-master.FileSystem.MetaHlogSplitTime_99th_percentile
-master.FileSystem.MetaHlogSplitTime_mean
-master.FileSystem.MetaHlogSplitTime_median
-master.FileSystem.MetaHlogSplitTime_min
-master.FileSystem.MetaHlogSplitTime_num_ops
-master.Master.ProcessCallTime_75th_percentile
-master.Master.ProcessCallTime_95th_percentile
-master.Master.ProcessCallTime_99th_percentile
-master.Master.ProcessCallTime_max
-master.Master.ProcessCallTime_mean
-master.Master.ProcessCallTime_median
-master.Master.ProcessCallTime_min
-master.Master.ProcessCallTime_num_ops
-master.Master.QueueCallTime_75th_percentile
-master.Master.QueueCallTime_95th_percentile
-master.Master.QueueCallTime_99th_percentile
-master.Master.QueueCallTime_max
-master.Master.QueueCallTime_mean
-regionserver.Server.blockCacheCountHitPercent
-master.Master.QueueCallTime_median
-master.Master.QueueCallTime_min
-master.Master.QueueCallTime_num_ops
-master.Master.TotalCallTime_75th_percentile
-master.Master.TotalCallTime_95th_percentile
-master.Master.TotalCallTime_99th_percentile
-master.Master.TotalCallTime_max
-master.Master.TotalCallTime_mean
-master.Master.TotalCallTime_median
-master.Master.TotalCallTime_min
-master.Master.TotalCallTime_num_ops
-master.Master.authenticationFailures
-master.Master.authenticationSuccesses
-master.Master.authorizationFailures
-master.Master.authorizationSuccesses
-master.Master.exceptions
-master.Master.exceptions.FailedSanityCheckException
-master.Master.exceptions.NotServingRegionException
-master.Master.exceptions.OutOfOrderScannerNextException
-master.Master.exceptions.RegionMovedException
-master.Master.exceptions.RegionTooBusyException
-master.Master.exceptions.UnknownScannerException
-master.Master.numActiveHandler
-master.Master.numCallsInGeneralQueue
-master.Master.numCallsInPriorityQueue
-master.Master.numCallsInReplicationQueue
-regionserver.Server.blockCacheSize
-master.Master.numOpenConnections
-master.Master.queueSize
-master.Master.receivedBytes
-master.Master.sentBytes
-master.Server.averageLoad
-master.Server.clusterRequests
-master.Server.masterActiveTime
-master.Server.numDeadRegionServers
-master.Server.masterStartTime
-master.Server.numRegionServers
-metricssystem.MetricsSystem.DroppedPubAll
-regionserver.Server.SplitTime_min
-regionserver.Server.blockCacheHitCount
-metricssystem.MetricsSystem.NumActiveSinks
-metricssystem.MetricsSystem.NumActiveSources
-metricssystem.MetricsSystem.NumAllSinks
-metricssystem.MetricsSystem.NumAllSources
-regionserver.Server.blockCacheExpressHitPercent
-metricssystem.MetricsSystem.PublishAvgTime
-metricssystem.MetricsSystem.PublishNumOps
-metricssystem.MetricsSystem.Sink_timelineAvgTime
-regionserver.Server.SplitTime_num_ops
-metricssystem.MetricsSystem.Sink_timelineDropped
-metricssystem.MetricsSystem.Sink_timelineNumOps
-regionserver.Server.SplitTime_max
-regionserver.Server.ScanNext_min
-metricssystem.MetricsSystem.Sink_timelineQsize
-metricssystem.MetricsSystem.SnapshotAvgTime
-metricssystem.MetricsSystem.SnapshotNumOps
-regionserver.Server.SplitTime_95th_percentile
-regionserver.Server.SplitTime_99th_percentile
-regionserver.RegionServer.ProcessCallTime_75th_percentile
-regionserver.RegionServer.ProcessCallTime_99th_percentile
-regionserver.RegionServer.ProcessCallTime_max
-regionserver.RegionServer.ProcessCallTime_min
-regionserver.RegionServer.QueueCallTime_99th_percentile
-regionserver.RegionServer.QueueCallTime_min
-regionserver.RegionServer.TotalCallTime_99th_percentile
-regionserver.RegionServer.TotalCallTime_max
-regionserver.RegionServer.TotalCallTime_min
-regionserver.RegionServer.authorizationFailures
-regionserver.RegionServer.exceptions.NotServingRegionException
-regionserver.RegionServer.exceptions.UnknownScannerException
-regionserver.RegionServer.numActiveHandler
-regionserver.RegionServer.numCallsInGeneralQueue
-regionserver.Server.ScanNext_95th_percentile
-regionserver.RegionServer.numCallsInReplicationQueue
-regionserver.RegionServer.sentBytes
-regionserver.Server.Append_75th_percentile
-regionserver.Server.Append_99th_percentile
-regionserver.Server.Append_max
-regionserver.Server.Append_mean
-regionserver.Server.Append_min
-regionserver.Server.Delete_75th_percentile
-regionserver.Server.Delete_99th_percentile
-regionserver.Server.Delete_max
-regionserver.Server.Delete_mean
-regionserver.Server.Delete_min
-regionserver.Server.FlushTime_75th_percentile
-regionserver.Server.FlushTime_mean
-regionserver.Server.FlushTime_median
-regionserver.Server.Get_75th_percentile
-regionserver.Server.Get_99th_percentile
-regionserver.Server.Get_mean
-regionserver.Server.Get_min
-regionserver.Server.Get_num_ops
-regionserver.Server.Increment_75th_percentile
-regionserver.Server.Increment_99th_percentile
-regionserver.Server.Increment_mean
-regionserver.Server.Increment_min
-regionserver.Server.Increment_num_ops
-regionserver.Server.Mutate_75th_percentile
-regionserver.Server.Mutate_99th_percentile
-regionserver.Server.Mutate_mean
-regionserver.Server.Mutate_min
-regionserver.Server.Mutate_num_ops
-regionserver.Server.Replay_75th_percentile
-regionserver.Server.Replay_99th_percentile
-regionserver.Server.Replay_mean
-regionserver.Server.Replay_min
-regionserver.Server.ScanNext_75th_percentile
-regionserver.Server.ScanNext_mean
-regionserver.Server.SplitTime_75th_percentile
-jvm.JvmMetrics.GcCount
-regionserver.Server.SplitTime_mean
-regionserver.Server.Replay_max
-regionserver.Server.blockCacheCount
-regionserver.Server.blockCacheEvictionCount
-regionserver.Server.blockCacheFreeSize
-regionserver.Server.blockCacheMissCount
-regionserver.Server.Replay_median
-regionserver.Server.blockedRequestCount
-regionserver.Server.compactedCellsCount
-regionserver.Server.compactionQueueLength
-regionserver.Server.flushedCellsSize
-regionserver.Server.memStoreSize
-regionserver.Server.mutationsWithoutWALSize
-jvm.JvmMetrics.GcCountConcurrentMarkSweep
-regionserver.Server.regionServerStartTime
-regionserver.Server.slowDeleteCount
-regionserver.Server.slowIncrementCount
-regionserver.Server.splitRequestCount
-regionserver.Server.splitSuccessCount
-regionserver.Server.staticBloomSize
-regionserver.Server.storeCount
-regionserver.Server.storeFileIndexSize
-regionserver.Server.totalRequestCount
-regionserver.Server.writeRequestCount
-regionserver.WAL.AppendSize_95th_percentile
-regionserver.WAL.AppendSize_max
-regionserver.WAL.AppendSize_median
-regionserver.Server.Replay_95th_percentile
-regionserver.WAL.AppendTime_95th_percentile
-regionserver.WAL.AppendTime_median
-regionserver.WAL.AppendTime_max
-jvm.JvmMetrics.GcCountParNew
-regionserver.WAL.AppendTime_mean
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HDFS.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HDFS.txt
deleted file mode 100755
index 84576e950e..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HDFS.txt
+++ /dev/null
@@ -1,277 +0,0 @@
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.CacheCapacity
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.CacheUsed
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.EstimatedCapacityLostTotal
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.LastVolumeFailureDate
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumBlocksCached
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumBlocksFailedToCache
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumBlocksFailedToUnCache
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes
-FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining
-default.StartupProgress.ElapsedTime
-default.StartupProgress.LoadingEditsCount
-default.StartupProgress.LoadingEditsElapsedTime
-default.StartupProgress.LoadingEditsPercentComplete
-default.StartupProgress.LoadingEditsTotal
-default.StartupProgress.LoadingFsImageCount
-default.StartupProgress.LoadingFsImageElapsedTime
-default.StartupProgress.LoadingFsImagePercentComplete
-default.StartupProgress.LoadingFsImageTotal
-default.StartupProgress.PercentComplete
-default.StartupProgress.SafeModeCount
-default.StartupProgress.SafeModeElapsedTime
-default.StartupProgress.SafeModePercentComplete
-default.StartupProgress.SafeModeTotal
-default.StartupProgress.SavingCheckpointCount
-default.StartupProgress.SavingCheckpointElapsedTime
-default.StartupProgress.SavingCheckpointPercentComplete
-default.StartupProgress.SavingCheckpointTotal
-dfs.FSNamesystem.BlockCapacity
-dfs.FSNamesystem.BlocksTotal
-dfs.FSNamesystem.CapacityRemaining
-dfs.FSNamesystem.CapacityRemainingGB
-dfs.FSNamesystem.CapacityTotal
-dfs.FSNamesystem.CapacityTotalGB
-dfs.FSNamesystem.CapacityUsed
-dfs.FSNamesystem.CapacityUsedGB
-dfs.FSNamesystem.CapacityUsedNonDFS
-dfs.FSNamesystem.CorruptBlocks
-dfs.FSNamesystem.ExcessBlocks
-dfs.FSNamesystem.ExpiredHeartbeats
-dfs.FSNamesystem.FilesTotal
-dfs.FSNamesystem.LastCheckpointTime
-dfs.FSNamesystem.LastWrittenTransactionId
-dfs.FSNamesystem.LockQueueLength
-dfs.FSNamesystem.MillisSinceLastLoadedEdits
-dfs.FSNamesystem.MissingBlocks
-dfs.FSNamesystem.MissingReplOneBlocks
-dfs.FSNamesystem.NumActiveClients
-dfs.FSNamesystem.NumFilesUnderConstruction
-dfs.FSNamesystem.PendingDataNodeMessageCount
-dfs.FSNamesystem.PendingDeletionBlocks
-dfs.FSNamesystem.PendingReplicationBlocks
-dfs.FSNamesystem.PostponedMisreplicatedBlocks
-dfs.FSNamesystem.ScheduledReplicationBlocks
-dfs.FSNamesystem.Snapshots
-dfs.FSNamesystem.SnapshottableDirectories
-dfs.FSNamesystem.StaleDataNodes
-dfs.FSNamesystem.TotalFiles
-dfs.FSNamesystem.TotalLoad
-dfs.FSNamesystem.TotalSyncCount
-dfs.FSNamesystem.TransactionsSinceLastCheckpoint
-dfs.FSNamesystem.TransactionsSinceLastLogRoll
-dfs.FSNamesystem.UnderReplicatedBlocks
-dfs.datanode.BlockChecksumOpAvgTime
-dfs.datanode.BlockChecksumOpNumOps
-dfs.datanode.BlockReportsAvgTime
-dfs.datanode.BlockReportsNumOps
-dfs.datanode.BlockVerificationFailures
-dfs.datanode.BlocksCached
-dfs.datanode.BlocksGetLocalPathInfo
-dfs.datanode.BlocksRead
-dfs.datanode.BlocksRemoved
-dfs.datanode.BlocksReplicated
-dfs.datanode.BlocksUncached
-dfs.datanode.BlocksVerified
-dfs.datanode.BlocksWritten
-dfs.datanode.BytesRead
-dfs.datanode.BytesWritten
-dfs.datanode.CacheReportsAvgTime
-dfs.datanode.CacheReportsNumOps
-dfs.datanode.CopyBlockOpAvgTime
-dfs.datanode.CopyBlockOpNumOps
-dfs.datanode.DatanodeNetworkErrors
-dfs.datanode.FlushNanosAvgTime
-dfs.datanode.FlushNanosNumOps
-dfs.datanode.FsyncCount
-dfs.datanode.FsyncNanosAvgTime
-dfs.datanode.FsyncNanosNumOps
-dfs.datanode.HeartbeatsAvgTime
-dfs.datanode.HeartbeatsNumOps
-dfs.datanode.IncrementalBlockReportsAvgTime
-dfs.datanode.IncrementalBlockReportsNumOps
-dfs.datanode.PacketAckRoundTripTimeNanosAvgTime
-dfs.datanode.PacketAckRoundTripTimeNanosNumOps
-dfs.datanode.RamDiskBlocksDeletedBeforeLazyPersisted
-dfs.datanode.RamDiskBlocksEvicted
-dfs.datanode.RamDiskBlocksEvictedWithoutRead
-dfs.datanode.RamDiskBlocksEvictionWindowMsAvgTime
-dfs.datanode.RamDiskBlocksEvictionWindowMsNumOps
-dfs.datanode.RamDiskBlocksLazyPersistWindowMsAvgTime
-dfs.datanode.RamDiskBlocksLazyPersistWindowMsNumOps
-dfs.datanode.RamDiskBlocksLazyPersisted
-dfs.datanode.RamDiskBlocksReadHits
-dfs.datanode.RamDiskBlocksWrite
-dfs.datanode.RamDiskBlocksWriteFallback
-dfs.datanode.RamDiskBytesLazyPersisted
-dfs.datanode.RamDiskBytesWrite
-dfs.datanode.ReadBlockOpAvgTime
-dfs.datanode.ReadBlockOpNumOps
-dfs.datanode.ReadsFromLocalClient
-dfs.datanode.ReadsFromRemoteClient
-dfs.datanode.RemoteBytesRead
-dfs.datanode.RemoteBytesWritten
-dfs.datanode.ReplaceBlockOpAvgTime
-dfs.datanode.ReplaceBlockOpNumOps
-dfs.datanode.SendDataPacketBlockedOnNetworkNanosAvgTime
-dfs.datanode.SendDataPacketBlockedOnNetworkNanosNumOps
-dfs.datanode.SendDataPacketTransferNanosAvgTime
-dfs.datanode.SendDataPacketTransferNanosNumOps
-dfs.datanode.TotalReadTime
-dfs.datanode.TotalWriteTime
-dfs.datanode.VolumeFailures
-dfs.datanode.WriteBlockOpAvgTime
-dfs.datanode.WriteBlockOpNumOps
-dfs.datanode.WritesFromLocalClient
-dfs.datanode.WritesFromRemoteClient
-dfs.namenode.AddBlockOps
-dfs.namenode.AllowSnapshotOps
-dfs.namenode.BlockReceivedAndDeletedOps
-dfs.namenode.BlockReportAvgTime
-dfs.namenode.BlockReportNumOps
-dfs.namenode.CacheReportAvgTime
-dfs.namenode.CacheReportNumOps
-dfs.namenode.CreateFileOps
-dfs.namenode.CreateSnapshotOps
-dfs.namenode.CreateSymlinkOps
-dfs.namenode.DeleteFileOps
-dfs.namenode.DeleteSnapshotOps
-dfs.namenode.DisallowSnapshotOps
-dfs.namenode.FileInfoOps
-dfs.namenode.FilesAppended
-dfs.namenode.FilesCreated
-dfs.namenode.FilesDeleted
-dfs.namenode.FilesInGetListingOps
-dfs.namenode.FilesRenamed
-dfs.namenode.FilesTruncated
-dfs.namenode.FsImageLoadTime
-dfs.namenode.GetAdditionalDatanodeOps
-dfs.namenode.GetBlockLocations
-dfs.namenode.GetEditAvgTime
-dfs.namenode.GetEditNumOps
-dfs.namenode.GetImageAvgTime
-dfs.namenode.GetImageNumOps
-dfs.namenode.GetLinkTargetOps
-dfs.namenode.GetListingOps
-dfs.namenode.ListSnapshottableDirOps
-dfs.namenode.PutImageAvgTime
-dfs.namenode.PutImageNumOps
-dfs.namenode.RenameSnapshotOps
-dfs.namenode.SafeModeTime
-dfs.namenode.SnapshotDiffReportOps
-dfs.namenode.StorageBlockReportOps
-dfs.namenode.SyncsAvgTime
-dfs.namenode.SyncsNumOps
-dfs.namenode.TotalFileOps
-dfs.namenode.TransactionsAvgTime
-dfs.namenode.TransactionsBatchedInSync
-dfs.namenode.TransactionsNumOps
-jvm.JvmMetrics.GcCount
-jvm.JvmMetrics.GcCountConcurrentMarkSweep
-jvm.JvmMetrics.GcCountParNew
-jvm.JvmMetrics.GcNumInfoThresholdExceeded
-jvm.JvmMetrics.GcNumWarnThresholdExceeded
-jvm.JvmMetrics.GcTimeMillis
-jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep
-jvm.JvmMetrics.GcTimeMillisParNew
-jvm.JvmMetrics.GcTotalExtraSleepTime
-jvm.JvmMetrics.LogError
-jvm.JvmMetrics.LogFatal
-jvm.JvmMetrics.LogInfo
-jvm.JvmMetrics.LogWarn
-jvm.JvmMetrics.MemHeapCommittedM
-jvm.JvmMetrics.MemHeapMaxM
-jvm.JvmMetrics.MemHeapUsedM
-jvm.JvmMetrics.MemMaxM
-jvm.JvmMetrics.MemNonHeapCommittedM
-jvm.JvmMetrics.MemNonHeapMaxM
-jvm.JvmMetrics.MemNonHeapUsedM
-jvm.JvmMetrics.ThreadsBlocked
-jvm.JvmMetrics.ThreadsNew
-jvm.JvmMetrics.ThreadsRunnable
-jvm.JvmMetrics.ThreadsTerminated
-jvm.JvmMetrics.ThreadsTimedWaiting
-jvm.JvmMetrics.ThreadsWaiting
-metricssystem.MetricsSystem.DroppedPubAll
-metricssystem.MetricsSystem.NumActiveSinks
-metricssystem.MetricsSystem.NumActiveSources
-metricssystem.MetricsSystem.NumAllSinks
-metricssystem.MetricsSystem.NumAllSources
-metricssystem.MetricsSystem.PublishAvgTime
-metricssystem.MetricsSystem.PublishNumOps
-metricssystem.MetricsSystem.Sink_timelineAvgTime
-metricssystem.MetricsSystem.Sink_timelineDropped
-metricssystem.MetricsSystem.Sink_timelineNumOps
-metricssystem.MetricsSystem.Sink_timelineQsize
-metricssystem.MetricsSystem.SnapshotAvgTime
-metricssystem.MetricsSystem.SnapshotNumOps
-rpc.RetryCache.NameNodeRetryCache.CacheCleared
-rpc.RetryCache.NameNodeRetryCache.CacheHit
-rpc.RetryCache.NameNodeRetryCache.CacheUpdated
-rpc.rpc.CallQueueLength
-rpc.rpc.NumOpenConnections
-rpc.rpc.ReceivedBytes
-rpc.rpc.RpcAuthenticationFailures
-rpc.rpc.RpcAuthenticationSuccesses
-rpc.rpc.RpcAuthorizationFailures
-rpc.rpc.RpcAuthorizationSuccesses
-rpc.rpc.RpcClientBackoff
-rpc.rpc.RpcProcessingTimeAvgTime
-rpc.rpc.RpcProcessingTimeNumOps
-rpc.rpc.RpcQueueTimeAvgTime
-rpc.rpc.RpcQueueTimeNumOps
-rpc.rpc.RpcSlowCalls
-rpc.rpc.SentBytes
-rpcdetailed.rpcdetailed.AddBlockAvgTime
-rpcdetailed.rpcdetailed.AddBlockNumOps
-rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime
-rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps
-rpcdetailed.rpcdetailed.BlockReportAvgTime
-rpcdetailed.rpcdetailed.BlockReportNumOps
-rpcdetailed.rpcdetailed.CompleteAvgTime
-rpcdetailed.rpcdetailed.CompleteNumOps
-rpcdetailed.rpcdetailed.CreateAvgTime
-rpcdetailed.rpcdetailed.CreateNumOps
-rpcdetailed.rpcdetailed.DeleteAvgTime
-rpcdetailed.rpcdetailed.DeleteNumOps
-rpcdetailed.rpcdetailed.FsyncAvgTime
-rpcdetailed.rpcdetailed.FsyncNumOps
-rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime
-rpcdetailed.rpcdetailed.GetBlockLocationsNumOps
-rpcdetailed.rpcdetailed.GetFileInfoAvgTime
-rpcdetailed.rpcdetailed.GetFileInfoNumOps
-rpcdetailed.rpcdetailed.GetListingAvgTime
-rpcdetailed.rpcdetailed.GetListingNumOps
-rpcdetailed.rpcdetailed.GetServerDefaultsAvgTime
-rpcdetailed.rpcdetailed.GetServerDefaultsNumOps
-rpcdetailed.rpcdetailed.GetTransactionIdAvgTime
-rpcdetailed.rpcdetailed.GetTransactionIdNumOps
-rpcdetailed.rpcdetailed.MkdirsAvgTime
-rpcdetailed.rpcdetailed.MkdirsNumOps
-rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime
-rpcdetailed.rpcdetailed.RegisterDatanodeNumOps
-rpcdetailed.rpcdetailed.Rename2AvgTime
-rpcdetailed.rpcdetailed.Rename2NumOps
-rpcdetailed.rpcdetailed.RenameAvgTime
-rpcdetailed.rpcdetailed.RenameNumOps
-rpcdetailed.rpcdetailed.RenewLeaseAvgTime
-rpcdetailed.rpcdetailed.RenewLeaseNumOps
-rpcdetailed.rpcdetailed.SendHeartbeatAvgTime
-rpcdetailed.rpcdetailed.SendHeartbeatNumOps
-rpcdetailed.rpcdetailed.SetPermissionAvgTime
-rpcdetailed.rpcdetailed.SetPermissionNumOps
-rpcdetailed.rpcdetailed.SetReplicationAvgTime
-rpcdetailed.rpcdetailed.SetReplicationNumOps
-rpcdetailed.rpcdetailed.SetSafeModeAvgTime
-rpcdetailed.rpcdetailed.SetSafeModeNumOps
-rpcdetailed.rpcdetailed.SetTimesAvgTime
-rpcdetailed.rpcdetailed.SetTimesNumOps
-rpcdetailed.rpcdetailed.VersionRequestAvgTime
-rpcdetailed.rpcdetailed.VersionRequestNumOps
-ugi.UgiMetrics.GetGroupsAvgTime
-ugi.UgiMetrics.GetGroupsNumOps
-ugi.UgiMetrics.LoginFailureAvgTime
-ugi.UgiMetrics.LoginFailureNumOps
-ugi.UgiMetrics.LoginSuccessAvgTime
-ugi.UgiMetrics.LoginSuccessNumOps
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HOST.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HOST.txt
deleted file mode 100755
index 4b759c697c..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/HOST.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-swap_free
-write_bps
-write_bytes
-write_count
-write_time
-bytes_in
-bytes_out
-cpu_idle
-cpu_intr
-cpu_nice
-cpu_num
-cpu_sintr
-cpu_steal
-cpu_system
-cpu_user
-load_fifteen
-load_five
-load_one
-cpu_wio
-disk_free
-disk_percent
-disk_total
-mem_buffered
-mem_cached
-mem_free
-mem_shared
-mem_total
-mem_used
-disk_used
-pkts_in
-pkts_out
-proc_run
-proc_total
-read_bps
-read_bytes
-read_count
-read_time
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/KAFKA.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/KAFKA.txt
deleted file mode 100755
index 1e2017cd89..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/KAFKA.txt
+++ /dev/null
@@ -1,190 +0,0 @@
-kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile
-kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile
-kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile
-kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.count
-kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.count
-kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.count
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.count
-kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.count
-kafka.network.RequestMetrics.RequestsPerSec.request.Produce.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Produce.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Produce.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.count
-kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Produce.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.count
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.count
-kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.count
-kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.count
-kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.count
-kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.count
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.count
-kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.meanRate
-kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.count
-kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.15MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.5MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.1MinuteRate
-kafka.network.RequestMetrics.RequestsPerSec.request.Produce.count
-kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.meanRate
-kafka.cluster.Partition.UnderReplicated.partition.0.topic.ambari_kafka_service_check
-kafka.server.ReplicaManager.UnderReplicatedPartitions
-kafka.server.ReplicaManager.PartitionCount
-kafka.server.ReplicaManager.LeaderCount
-kafka.server.ReplicaManager.IsrShrinksPerSec.count
-kafka.server.ReplicaManager.IsrShrinksPerSec.5MinuteRate
-kafka.server.ReplicaManager.IsrShrinksPerSec.1MinuteRate
-kafka.server.ReplicaManager.IsrShrinksPerSec.15MinuteRate
-kafka.server.ReplicaManager.IsrExpandsPerSec.meanRate
-kafka.server.ReplicaManager.IsrExpandsPerSec.count
-kafka.server.ReplicaManager.IsrExpandsPerSec.5MinuteRate
-kafka.server.ReplicaManager.IsrExpandsPerSec.15MinuteRate
-kafka.server.ReplicaFetcherManager.MinFetchRate.clientId.Replica
-kafka.server.ReplicaFetcherManager.MaxLag.clientId.Replica
-kafka.server.OffsetManager.NumOffsets
-kafka.server.OffsetManager.NumGroups
-kafka.server.KafkaServer.BrokerState
-kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.meanRate
-kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.5MinuteRate
-kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.1MinuteRate
-kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.15MinuteRate
-kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.meanRate
-kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.count
-kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.5MinuteRate
-kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.1MinuteRate
-kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.15MinuteRate
-kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.meanRate
-kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.count
-kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.5MinuteRate
-kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.1MinuteRate
-kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.15MinuteRate
-kafka.server.BrokerTopicMetrics.MessagesInPerSec.count
-kafka.server.BrokerTopicMetrics.MessagesInPerSec.5MinuteRate
-kafka.server.BrokerTopicMetrics.MessagesInPerSec.1MinuteRate
-kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.meanRate
-kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.count
-kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.5MinuteRate
-kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.15MinuteRate
-kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.meanRate
-kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.count
-kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.5MinuteRate
-kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.1MinuteRate
-kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.15MinuteRate
-kafka.server.BrokerTopicMetrics.BytesOutPerSec.meanRate
-kafka.server.BrokerTopicMetrics.BytesOutPerSec.count
-kafka.server.BrokerTopicMetrics.BytesOutPerSec.5MinuteRate
-kafka.server.BrokerTopicMetrics.BytesOutPerSec.1MinuteRate
-kafka.server.BrokerTopicMetrics.BytesInPerSec.meanRate
-kafka.server.BrokerTopicMetrics.BytesInPerSec.count
-kafka.server.BrokerTopicMetrics.BytesInPerSec.5MinuteRate
-kafka.server.BrokerTopicMetrics.BytesInPerSec.15MinuteRate
-kafka.network.SocketServer.ResponsesBeingSent
-kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.meanRate
-kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.5MinuteRate
-kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.1MinuteRate
-kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.15MinuteRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.2.meanRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.2.count
-kafka.network.SocketServer.IdlePercent.networkProcessor.2.5MinuteRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.2.1MinuteRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.2.15MinuteRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.1.meanRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.1.count
-kafka.network.SocketServer.IdlePercent.networkProcessor.1.5MinuteRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.1.1MinuteRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.1.15MinuteRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.0.meanRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.0.count
-kafka.network.SocketServer.IdlePercent.networkProcessor.0.1MinuteRate
-kafka.network.SocketServer.IdlePercent.networkProcessor.0.15MinuteRate
-kafka.network.RequestChannel.ResponseQueueSize.processor.0
-kafka.network.RequestChannel.ResponseQueueSize.processor.1
-kafka.network.SocketServer.IdlePercent.networkProcessor.0.5MinuteRate
-kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.count
-kafka.server.BrokerTopicMetrics.BytesInPerSec.1MinuteRate
-kafka.server.BrokerTopicMetrics.BytesOutPerSec.15MinuteRate
-kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.1MinuteRate
-kafka.server.BrokerTopicMetrics.MessagesInPerSec.15MinuteRate
-kafka.server.BrokerTopicMetrics.MessagesInPerSec.meanRate
-kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.count
-kafka.server.ReplicaManager.IsrExpandsPerSec.1MinuteRate
-kafka.server.ReplicaManager.IsrShrinksPerSec.meanRate
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.15MinuteRate
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.1MinuteRate
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.5MinuteRate
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.75percentile
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.95percentile
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.999percentile
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.99percentile
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.count
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.max
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.mean
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.meanRate
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.median
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMsstddev
-kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.15MinuteRate
-kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.1MinuteRate
-kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.count
-kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.meanRate
-kafka.controller.KafkaController.ActiveControllerCount
-kafka.controller.KafkaController.OfflinePartitionsCount
-kafka.controller.KafkaController.PreferredReplicaImbalanceCount
-kafka.log.Log.LogEndOffset.partition
-kafka.log.Log.LogStartOffset.partition
-kafka.log.Log.NumLogSegments.partition
-kafka.log.Log.Size.partition
-kafka.network.RequestChannel.RequestQueueSize
-kafka.network.RequestChannel.ResponseQueueSize
-kafka.network.RequestChannel.ResponseQueueSize.processor.2
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.min
-kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.5MinuteRate
-kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.98percentile
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/STORM.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/STORM.txt
deleted file mode 100755
index 04bca008dc..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/STORM.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Supervisors
-Total Tasks
-Total Slots
-Used Slots
-Topologies
-Total Executors
-Free Slots
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/YARN.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/YARN.txt
deleted file mode 100755
index ce04228532..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/files/service-metrics/YARN.txt
+++ /dev/null
@@ -1,178 +0,0 @@
-jvm.JvmMetrics.GcCount
-jvm.JvmMetrics.GcCountPS
-jvm.JvmMetrics.GcTimeMillis
-jvm.JvmMetrics.GcTimeMillisPS
-jvm.JvmMetrics.LogError
-jvm.JvmMetrics.LogFatal
-jvm.JvmMetrics.LogInfo
-jvm.JvmMetrics.LogWarn
-jvm.JvmMetrics.MemHeapCommittedM
-jvm.JvmMetrics.MemHeapMaxM
-jvm.JvmMetrics.MemHeapUsedM
-jvm.JvmMetrics.MemMaxM
-jvm.JvmMetrics.MemNonHeapCommittedM
-jvm.JvmMetrics.MemNonHeapMaxM
-jvm.JvmMetrics.MemNonHeapUsedM
-jvm.JvmMetrics.ThreadsBlocked
-jvm.JvmMetrics.ThreadsNew
-jvm.JvmMetrics.ThreadsRunnable
-jvm.JvmMetrics.ThreadsTerminated
-jvm.JvmMetrics.ThreadsTimedWaiting
-jvm.JvmMetrics.ThreadsWaiting
-mapred.ShuffleMetrics.ShuffleConnections
-mapred.ShuffleMetrics.ShuffleOutputBytes
-mapred.ShuffleMetrics.ShuffleOutputsFailed
-mapred.ShuffleMetrics.ShuffleOutputsOK
-metricssystem.MetricsSystem.DroppedPubAll
-metricssystem.MetricsSystem.NumActiveSinks
-metricssystem.MetricsSystem.NumActiveSources
-metricssystem.MetricsSystem.NumAllSinks
-metricssystem.MetricsSystem.NumAllSources
-metricssystem.MetricsSystem.PublishAvgTime
-metricssystem.MetricsSystem.PublishNumOps
-metricssystem.MetricsSystem.Sink_timelineAvgTime
-metricssystem.MetricsSystem.Sink_timelineDropped
-metricssystem.MetricsSystem.Sink_timelineNumOps
-metricssystem.MetricsSystem.Sink_timelineQsize
-metricssystem.MetricsSystem.SnapshotAvgTime
-metricssystem.MetricsSystem.SnapshotNumOps
-rpc.rpc.CallQueueLength
-rpc.rpc.NumOpenConnections
-rpc.rpc.ReceivedBytes
-rpc.rpc.RpcAuthenticationFailures
-rpc.rpc.RpcAuthenticationSuccesses
-rpc.rpc.RpcAuthorizationFailures
-rpc.rpc.RpcAuthorizationSuccesses
-rpc.rpc.RpcClientBackoff
-rpc.rpc.RpcProcessingTimeAvgTime
-rpc.rpc.RpcProcessingTimeNumOps
-rpc.rpc.RpcQueueTimeAvgTime
-rpc.rpc.RpcQueueTimeNumOps
-rpc.rpc.RpcSlowCalls
-rpc.rpc.SentBytes
-rpcdetailed.rpcdetailed.AllocateAvgTime
-rpcdetailed.rpcdetailed.AllocateNumOps
-rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime
-rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps
-rpcdetailed.rpcdetailed.GetApplicationReportAvgTime
-rpcdetailed.rpcdetailed.GetApplicationReportNumOps
-rpcdetailed.rpcdetailed.GetClusterMetricsAvgTime
-rpcdetailed.rpcdetailed.GetClusterMetricsNumOps
-rpcdetailed.rpcdetailed.GetClusterNodesAvgTime
-rpcdetailed.rpcdetailed.GetClusterNodesNumOps
-rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime
-rpcdetailed.rpcdetailed.GetContainerStatusesNumOps
-rpcdetailed.rpcdetailed.GetNewApplicationAvgTime
-rpcdetailed.rpcdetailed.GetNewApplicationNumOps
-rpcdetailed.rpcdetailed.GetQueueInfoAvgTime
-rpcdetailed.rpcdetailed.GetQueueInfoNumOps
-rpcdetailed.rpcdetailed.GetQueueUserAclsAvgTime
-rpcdetailed.rpcdetailed.GetQueueUserAclsNumOps
-rpcdetailed.rpcdetailed.HeartbeatAvgTime
-rpcdetailed.rpcdetailed.HeartbeatNumOps
-rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime
-rpcdetailed.rpcdetailed.NodeHeartbeatNumOps
-rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime
-rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps
-rpcdetailed.rpcdetailed.RegisterNodeManagerAvgTime
-rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps
-rpcdetailed.rpcdetailed.StartContainersAvgTime
-rpcdetailed.rpcdetailed.StartContainersNumOps
-rpcdetailed.rpcdetailed.StopContainersAvgTime
-rpcdetailed.rpcdetailed.StopContainersNumOps
-rpcdetailed.rpcdetailed.SubmitApplicationAvgTime
-rpcdetailed.rpcdetailed.SubmitApplicationNumOps
-ugi.UgiMetrics.GetGroupsAvgTime
-ugi.UgiMetrics.GetGroupsNumOps
-ugi.UgiMetrics.LoginFailureAvgTime
-ugi.UgiMetrics.LoginFailureNumOps
-ugi.UgiMetrics.LoginSuccessAvgTime
-ugi.UgiMetrics.LoginSuccessNumOps
-yarn.ClusterMetrics.AMLaunchDelayAvgTime
-yarn.ClusterMetrics.AMLaunchDelayNumOps
-yarn.ClusterMetrics.AMRegisterDelayAvgTime
-yarn.ClusterMetrics.AMRegisterDelayNumOps
-yarn.ClusterMetrics.NumActiveNMs
-yarn.ClusterMetrics.NumDecommissionedNMs
-yarn.ClusterMetrics.NumLostNMs
-yarn.ClusterMetrics.NumRebootedNMs
-yarn.ClusterMetrics.NumUnhealthyNMs
-yarn.NodeManagerMetrics.AllocatedContainers
-yarn.NodeManagerMetrics.AllocatedGB
-yarn.NodeManagerMetrics.AllocatedVCores
-yarn.NodeManagerMetrics.AvailableGB
-yarn.NodeManagerMetrics.AvailableVCores
-yarn.NodeManagerMetrics.BadLocalDirs
-yarn.NodeManagerMetrics.BadLogDirs
-yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime
-yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps
-yarn.NodeManagerMetrics.ContainersCompleted
-yarn.NodeManagerMetrics.ContainersFailed
-yarn.NodeManagerMetrics.ContainersIniting
-yarn.NodeManagerMetrics.ContainersKilled
-yarn.NodeManagerMetrics.ContainersLaunched
-yarn.NodeManagerMetrics.ContainersRunning
-yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc
-yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc
-yarn.QueueMetrics.Queue=root.AMResourceLimitMB
-yarn.QueueMetrics.Queue=root.AMResourceLimitVCores
-yarn.QueueMetrics.Queue=root.ActiveApplications
-yarn.QueueMetrics.Queue=root.ActiveUsers
-yarn.QueueMetrics.Queue=root.AggregateContainersAllocated
-yarn.QueueMetrics.Queue=root.AggregateContainersReleased
-yarn.QueueMetrics.Queue=root.AllocatedContainers
-yarn.QueueMetrics.Queue=root.AllocatedMB
-yarn.QueueMetrics.Queue=root.AllocatedVCores
-yarn.QueueMetrics.Queue=root.AppAttemptFirstContainerAllocationDelayAvgTime
-yarn.QueueMetrics.Queue=root.AppAttemptFirstContainerAllocationDelayNumOps
-yarn.QueueMetrics.Queue=root.AppsCompleted
-yarn.QueueMetrics.Queue=root.AppsFailed
-yarn.QueueMetrics.Queue=root.AppsKilled
-yarn.QueueMetrics.Queue=root.AppsPending
-yarn.QueueMetrics.Queue=root.AppsRunning
-yarn.QueueMetrics.Queue=root.AppsSubmitted
-yarn.QueueMetrics.Queue=root.AvailableMB
-yarn.QueueMetrics.Queue=root.AvailableVCores
-yarn.QueueMetrics.Queue=root.PendingContainers
-yarn.QueueMetrics.Queue=root.PendingMB
-yarn.QueueMetrics.Queue=root.PendingVCores
-yarn.QueueMetrics.Queue=root.ReservedContainers
-yarn.QueueMetrics.Queue=root.ReservedMB
-yarn.QueueMetrics.Queue=root.ReservedVCores
-yarn.QueueMetrics.Queue=root.UsedAMResourceMB
-yarn.QueueMetrics.Queue=root.UsedAMResourceVCores
-yarn.QueueMetrics.Queue=root.default.AMResourceLimitMB
-yarn.QueueMetrics.Queue=root.default.AMResourceLimitVCores
-yarn.QueueMetrics.Queue=root.default.ActiveApplications
-yarn.QueueMetrics.Queue=root.default.ActiveUsers
-yarn.QueueMetrics.Queue=root.default.AggregateContainersAllocated
-yarn.QueueMetrics.Queue=root.default.AggregateContainersReleased
-yarn.QueueMetrics.Queue=root.default.AllocatedContainers
-yarn.QueueMetrics.Queue=root.default.AllocatedMB
-yarn.QueueMetrics.Queue=root.default.AllocatedVCores
-yarn.QueueMetrics.Queue=root.default.AppAttemptFirstContainerAllocationDelayAvgTime
-yarn.QueueMetrics.Queue=root.default.AppAttemptFirstContainerAllocationDelayNumOps
-yarn.QueueMetrics.Queue=root.default.AppsCompleted
-yarn.QueueMetrics.Queue=root.default.AppsFailed
-yarn.QueueMetrics.Queue=root.default.AppsKilled
-yarn.QueueMetrics.Queue=root.default.AppsPending
-yarn.QueueMetrics.Queue=root.default.AppsRunning
-yarn.QueueMetrics.Queue=root.default.AppsSubmitted
-yarn.QueueMetrics.Queue=root.default.AvailableMB
-yarn.QueueMetrics.Queue=root.default.AvailableVCores
-yarn.QueueMetrics.Queue=root.default.PendingContainers
-yarn.QueueMetrics.Queue=root.default.PendingMB
-yarn.QueueMetrics.Queue=root.default.PendingVCores
-yarn.QueueMetrics.Queue=root.default.ReservedContainers
-yarn.QueueMetrics.Queue=root.default.ReservedMB
-yarn.QueueMetrics.Queue=root.default.ReservedVCores
-yarn.QueueMetrics.Queue=root.default.UsedAMResourceMB
-yarn.QueueMetrics.Queue=root.default.UsedAMResourceVCores
-yarn.QueueMetrics.Queue=root.default.running_0
-yarn.QueueMetrics.Queue=root.default.running_1440
-yarn.QueueMetrics.Queue=root.default.running_300
-yarn.QueueMetrics.Queue=root.default.running_60
-yarn.QueueMetrics.Queue=root.running_0
-yarn.QueueMetrics.Queue=root.running_1440
-yarn.QueueMetrics.Queue=root.running_300
-yarn.QueueMetrics.Queue=root.running_60
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/__init__.py
deleted file mode 100755
index 5561e1087b..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/ams.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/ams.py
deleted file mode 100755
index 7b1a8249de..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/ams.py
+++ /dev/null
@@ -1,388 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons.str_utils import compress_backslashes
-import glob
-import os
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def ams(name=None):
- import params
- if name == 'collector':
- if not check_windows_service_exists(params.ams_collector_win_service_name):
- Execute(format("cmd /C cd {ams_collector_home_dir} & ambari-metrics-collector.cmd setup"))
-
- Directory(params.ams_collector_conf_dir,
- owner=params.ams_user,
- create_parents = True
- )
-
- Directory(params.ams_checkpoint_dir,
- owner=params.ams_user,
- create_parents = True
- )
-
- XmlConfig("ams-site.xml",
- conf_dir=params.ams_collector_conf_dir,
- configurations=params.config['configurations']['ams-site'],
- configuration_attributes=params.config['configuration_attributes']['ams-site'],
- owner=params.ams_user,
- )
-
- merged_ams_hbase_site = {}
- merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
- if params.security_enabled:
- merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
-
- XmlConfig( "hbase-site.xml",
- conf_dir = params.ams_collector_conf_dir,
- configurations = merged_ams_hbase_site,
- configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
- owner = params.ams_user,
- )
-
- if (params.log4j_props != None):
- File(os.path.join(params.ams_collector_conf_dir, "log4j.properties"),
- owner=params.ams_user,
- content=params.log4j_props
- )
-
- File(os.path.join(params.ams_collector_conf_dir, "ams-env.cmd"),
- owner=params.ams_user,
- content=InlineTemplate(params.ams_env_sh_template)
- )
-
- ServiceConfig(params.ams_collector_win_service_name,
- action="change_user",
- username = params.ams_user,
- password = Script.get_password(params.ams_user))
-
- if not params.is_local_fs_rootdir:
- # Configuration needed to support NN HA
- XmlConfig("hdfs-site.xml",
- conf_dir=params.ams_collector_conf_dir,
- configurations=params.config['configurations']['hdfs-site'],
- configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
- owner=params.ams_user,
- group=params.user_group,
- mode=0644
- )
-
- XmlConfig("hdfs-site.xml",
- conf_dir=params.hbase_conf_dir,
- configurations=params.config['configurations']['hdfs-site'],
- configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
- owner=params.ams_user,
- group=params.user_group,
- mode=0644
- )
-
- XmlConfig("core-site.xml",
- conf_dir=params.ams_collector_conf_dir,
- configurations=params.config['configurations']['core-site'],
- configuration_attributes=params.config['configuration_attributes']['core-site'],
- owner=params.ams_user,
- group=params.user_group,
- mode=0644
- )
-
- XmlConfig("core-site.xml",
- conf_dir=params.hbase_conf_dir,
- configurations=params.config['configurations']['core-site'],
- configuration_attributes=params.config['configuration_attributes']['core-site'],
- owner=params.ams_user,
- group=params.user_group,
- mode=0644
- )
-
- else:
- ServiceConfig(params.ams_embedded_hbase_win_service_name,
- action="change_user",
- username = params.ams_user,
- password = Script.get_password(params.ams_user))
- # creating symbolic links on ams jars to make them available to services
- links_pairs = [
- ("%COLLECTOR_HOME%\\hbase\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
- "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
- ]
- for link_pair in links_pairs:
- link, target = link_pair
- real_link = os.path.expandvars(link)
- target = compress_backslashes(glob.glob(os.path.expandvars(target))[0])
- if not os.path.exists(real_link):
- #TODO check the symlink destination too. Broken in Python 2.x on Windows.
- Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target))
- pass
-
- elif name == 'monitor':
- if not check_windows_service_exists(params.ams_monitor_win_service_name):
- Execute(format("cmd /C cd {ams_monitor_home_dir} & ambari-metrics-monitor.cmd setup"))
-
- # creating symbolic links on ams jars to make them available to services
- links_pairs = [
- ("%HADOOP_HOME%\\share\\hadoop\\common\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
- "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
- ("%HBASE_HOME%\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
- "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
- ]
- for link_pair in links_pairs:
- link, target = link_pair
- real_link = os.path.expandvars(link)
- target = compress_backslashes(glob.glob(os.path.expandvars(target))[0])
- if not os.path.exists(real_link):
- #TODO check the symlink destination too. Broken in Python 2.x on Windows.
- Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target))
-
- Directory(params.ams_monitor_conf_dir,
- owner=params.ams_user,
- create_parents = True
- )
-
- TemplateConfig(
- os.path.join(params.ams_monitor_conf_dir, "metric_monitor.ini"),
- owner=params.ams_user,
- template_tag=None
- )
-
- TemplateConfig(
- os.path.join(params.ams_monitor_conf_dir, "metric_groups.conf"),
- owner=params.ams_user,
- template_tag=None
- )
-
- ServiceConfig(params.ams_monitor_win_service_name,
- action="change_user",
- username = params.ams_user,
- password = Script.get_password(params.ams_user))
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def ams(name=None):
- import params
-
- if name == 'collector':
- Directory(params.ams_collector_conf_dir,
- owner=params.ams_user,
- group=params.user_group,
- create_parents = True
- )
-
- Execute(('chown', '-R', params.ams_user, params.ams_collector_conf_dir),
- sudo=True
- )
-
- Directory(params.ams_checkpoint_dir,
- owner=params.ams_user,
- group=params.user_group,
- cd_access="a",
- create_parents = True
- )
-
- Execute(('chown', '-R', params.ams_user, params.ams_checkpoint_dir),
- sudo=True
- )
-
- XmlConfig("ams-site.xml",
- conf_dir=params.ams_collector_conf_dir,
- configurations=params.config['configurations']['ams-site'],
- configuration_attributes=params.config['configuration_attributes']['ams-site'],
- owner=params.ams_user,
- group=params.user_group
- )
-
- merged_ams_hbase_site = {}
- merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
- if params.security_enabled:
- merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
-
- # Add phoenix client side overrides
- merged_ams_hbase_site['phoenix.query.maxGlobalMemoryPercentage'] = str(params.phoenix_max_global_mem_percent)
- merged_ams_hbase_site['phoenix.spool.directory'] = params.phoenix_client_spool_dir
-
- XmlConfig( "hbase-site.xml",
- conf_dir = params.ams_collector_conf_dir,
- configurations = merged_ams_hbase_site,
- configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
- owner = params.ams_user,
- group = params.user_group
- )
-
- if params.security_enabled:
- TemplateConfig(os.path.join(params.hbase_conf_dir, "ams_collector_jaas.conf"),
- owner = params.ams_user,
- template_tag = None)
-
- if (params.log4j_props != None):
- File(format("{params.ams_collector_conf_dir}/log4j.properties"),
- mode=0644,
- group=params.user_group,
- owner=params.ams_user,
- content=params.log4j_props
- )
-
- File(format("{ams_collector_conf_dir}/ams-env.sh"),
- owner=params.ams_user,
- content=InlineTemplate(params.ams_env_sh_template)
- )
-
- Directory(params.ams_collector_log_dir,
- owner=params.ams_user,
- group=params.user_group,
- cd_access="a",
- create_parents = True,
- mode=0755,
- )
-
- Directory(params.ams_collector_pid_dir,
- owner=params.ams_user,
- group=params.user_group,
- cd_access="a",
- create_parents = True,
- mode=0755,
- )
-
- # Hack to allow native HBase libs to be included for embedded hbase
- File(os.path.join(params.ams_hbase_home_dir, "bin", "hadoop"),
- owner=params.ams_user,
- mode=0755
- )
-
- # On some OS this folder could be not exists, so we will create it before pushing there files
- Directory(params.limits_conf_dir,
- create_parents = True,
- owner='root',
- group='root'
- )
-
- # Setting up security limits
- File(os.path.join(params.limits_conf_dir, 'ams.conf'),
- owner='root',
- group='root',
- mode=0644,
- content=Template("ams.conf.j2")
- )
-
- # Phoenix spool file dir if not /tmp
- if not os.path.exists(params.phoenix_client_spool_dir):
- Directory(params.phoenix_client_spool_dir,
- owner=params.ams_user,
- mode = 0755,
- group=params.user_group,
- cd_access="a",
- create_parents = True
- )
- pass
-
- if not params.is_local_fs_rootdir and params.is_ams_distributed:
- # Configuration needed to support NN HA
- XmlConfig("hdfs-site.xml",
- conf_dir=params.ams_collector_conf_dir,
- configurations=params.config['configurations']['hdfs-site'],
- configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
- owner=params.ams_user,
- group=params.user_group,
- mode=0644
- )
-
- XmlConfig("hdfs-site.xml",
- conf_dir=params.hbase_conf_dir,
- configurations=params.config['configurations']['hdfs-site'],
- configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
- owner=params.ams_user,
- group=params.user_group,
- mode=0644
- )
-
- XmlConfig("core-site.xml",
- conf_dir=params.ams_collector_conf_dir,
- configurations=params.config['configurations']['core-site'],
- configuration_attributes=params.config['configuration_attributes']['core-site'],
- owner=params.ams_user,
- group=params.user_group,
- mode=0644
- )
-
- XmlConfig("core-site.xml",
- conf_dir=params.hbase_conf_dir,
- configurations=params.config['configurations']['core-site'],
- configuration_attributes=params.config['configuration_attributes']['core-site'],
- owner=params.ams_user,
- group=params.user_group,
- mode=0644
- )
-
- pass
-
- elif name == 'monitor':
- Directory(params.ams_monitor_conf_dir,
- owner=params.ams_user,
- group=params.user_group,
- create_parents = True
- )
-
- Directory(params.ams_monitor_log_dir,
- owner=params.ams_user,
- group=params.user_group,
- mode=0755,
- create_parents = True
- )
-
- Directory(params.ams_monitor_pid_dir,
- owner=params.ams_user,
- group=params.user_group,
- mode=0755,
- create_parents = True
- )
-
- Directory(format("{ams_monitor_dir}/psutil/build"),
- owner=params.ams_user,
- group=params.user_group,
- cd_access="a",
- create_parents = True)
-
- Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_dir}")
- )
-
- TemplateConfig(
- format("{ams_monitor_conf_dir}/metric_monitor.ini"),
- owner=params.ams_user,
- group=params.user_group,
- template_tag=None
- )
-
- TemplateConfig(
- format("{ams_monitor_conf_dir}/metric_groups.conf"),
- owner=params.ams_user,
- group=params.user_group,
- template_tag=None
- )
-
- File(format("{ams_monitor_conf_dir}/ams-env.sh"),
- owner=params.ams_user,
- content=InlineTemplate(params.ams_env_sh_template)
- )
-
- # TODO
- pass
-
- pass
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/ams_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/ams_service.py
deleted file mode 100755
index 0726802c27..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/ams_service.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# !/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from hbase_service import hbase_service
-import os
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def ams_service(name, action):
- import params
- if name == 'collector':
- Service(params.ams_embedded_hbase_win_service_name, action=action)
- Service(params.ams_collector_win_service_name, action=action)
- elif name == 'monitor':
- Service(params.ams_monitor_win_service_name, action=action)
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def ams_service(name, action):
- import params
-
- if name == 'collector':
- cmd = format("{ams_collector_script} --config {ams_collector_conf_dir}")
- pid_file = format("{ams_collector_pid_dir}/ambari-metrics-collector.pid")
- #no_op_test should be much more complex to work with cumulative status of collector
- #removing as startup script handle it also
- #no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-
- if params.is_hbase_distributed:
- hbase_service('zookeeper', action=action)
- hbase_service('master', action=action)
- hbase_service('regionserver', action=action)
- cmd = format("{cmd} --distributed")
-
- if action == 'start':
- Execute(format("{sudo} rm -rf {hbase_tmp_dir}/*.tmp")
- )
-
- if not params.is_hbase_distributed and os.path.exists(format("{zookeeper_data_dir}")):
- Directory(params.zookeeper_data_dir,
- action='delete'
- )
-
-
- if params.security_enabled:
- kinit_cmd = format("{kinit_path_local} -kt {ams_collector_keytab_path} {ams_collector_jaas_princ};")
- daemon_cmd = format("{kinit_cmd} {cmd} start")
- else:
- daemon_cmd = format("{cmd} start")
-
- Execute(daemon_cmd,
- user=params.ams_user
- )
-
- pass
- elif action == 'stop':
- daemon_cmd = format("{cmd} stop")
- Execute(daemon_cmd,
- user=params.ams_user
- )
-
- pass
- pass
- elif name == 'monitor':
- cmd = format("{ams_monitor_script} --config {ams_monitor_conf_dir}")
- pid_file = format("{ams_monitor_pid_dir}/ambari-metrics-monitor.pid")
- no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-
- if action == 'start':
- daemon_cmd = format("{cmd} start")
- Execute(daemon_cmd,
- user=params.ams_user
- )
-
- pass
- elif action == 'stop':
-
- daemon_cmd = format("{cmd} stop")
- Execute(daemon_cmd,
- user=params.ams_user
- )
-
- pass
- pass
- pass
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/functions.py
deleted file mode 100755
index 140c24c727..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/functions.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import math
-import datetime
-
-from resource_management.core.shell import checked_call
-
-def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
- """
- @param heapsize: str (e.g 1000m)
- @param xmn_percent: float (e.g 0.2)
- @param xmn_max: integer (e.g 512)
- """
- heapsize = int(re.search('\d+', str(heapsize_str)).group(0))
- heapsize_unit = re.search('\D+', str(heapsize_str)).group(0)
-
- xmn_val = int(math.floor(heapsize*xmn_percent))
- xmn_val -= xmn_val % 8
-
- result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
- return str(result_xmn_val) + heapsize_unit
-
-def trim_heap_property(property, m_suffix = "m"):
- if property and property.endswith(m_suffix):
- property = property[:-1]
- return property
-
-def check_append_heap_property(property, m_suffix = "m"):
- if property and not property.endswith(m_suffix):
- property += m_suffix
- return property \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase.py
deleted file mode 100755
index 16d741fe0b..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase.py
+++ /dev/null
@@ -1,267 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from ambari_commons import OSConst
-from resource_management import *
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def hbase(name=None, action = None):
- import params
- Directory(params.hbase_conf_dir,
- owner = params.hadoop_user,
- create_parents = True
- )
- Directory(params.hbase_tmp_dir,
- create_parents = True,
- owner = params.hadoop_user
- )
-
- Directory (os.path.join(params.local_dir, "jars"),
- owner = params.hadoop_user,
- create_parents = True
- )
-
- XmlConfig("hbase-site.xml",
- conf_dir = params.hbase_conf_dir,
- configurations = params.config['configurations']['ams-hbase-site'],
- configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
- owner = params.hadoop_user
- )
-
- if 'ams-hbase-policy' in params.config['configurations']:
- XmlConfig("hbase-policy.xml",
- conf_dir = params.hbase_conf_dir,
- configurations = params.config['configurations']['ams-hbase-policy'],
- configuration_attributes=params.config['configuration_attributes']['ams-hbase-policy'],
- owner = params.hadoop_user
- )
- # Manually overriding ownership of file installed by hadoop package
- else:
- File(os.path.join(params.hbase_conf_dir, "hbase-policy.xml"),
- owner = params.hadoop_user
- )
-
- # Metrics properties
- File(os.path.join(params.hbase_conf_dir, "hadoop-metrics2-hbase.properties"),
- owner = params.hbase_user,
- content=Template("hadoop-metrics2-hbase.properties.j2")
- )
-
- hbase_TemplateConfig('regionservers', user=params.hadoop_user)
-
- if params.security_enabled:
- hbase_TemplateConfig(format("hbase_{name}_jaas.conf"), user=params.hadoop_user)
-
- if name != "client":
- Directory (params.hbase_log_dir,
- owner = params.hadoop_user,
- create_parents = True
- )
-
- if (params.hbase_log4j_props != None):
- File(os.path.join(params.hbase_conf_dir, "log4j.properties"),
- owner=params.hadoop_user,
- content=params.hbase_log4j_props
- )
- elif (os.path.exists(os.path.join(params.hbase_conf_dir,"log4j.properties"))):
- File(os.path.join(params.hbase_conf_dir,"log4j.properties"),
- owner=params.hadoop_user
- )
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def hbase(name=None # 'master' or 'regionserver' or 'client'
- , action=None):
- import params
-
- Directory(params.hbase_conf_dir,
- owner = params.hbase_user,
- group = params.user_group,
- create_parents = True
- )
-
- Execute(('chown', '-R', params.hbase_user, params.hbase_conf_dir),
- sudo=True
- )
-
- Directory (params.hbase_tmp_dir,
- owner = params.hbase_user,
- cd_access="a",
- create_parents = True
- )
-
- Execute(('chown', '-R', params.hbase_user, params.hbase_tmp_dir),
- sudo=True
- )
-
- Directory (os.path.join(params.local_dir, "jars"),
- owner = params.hbase_user,
- group = params.user_group,
- cd_access="a",
- mode=0775,
- create_parents = True
- )
-
- merged_ams_hbase_site = {}
- merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
- if params.security_enabled:
- merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
-
- XmlConfig("hbase-site.xml",
- conf_dir = params.hbase_conf_dir,
- configurations = merged_ams_hbase_site,
- configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
- owner = params.hbase_user,
- group = params.user_group
- )
-
- # Phoenix spool file dir if not /tmp
- if not os.path.exists(params.phoenix_server_spool_dir):
- Directory(params.phoenix_server_spool_dir,
- owner=params.ams_user,
- mode = 0755,
- group=params.user_group,
- cd_access="a",
- create_parents=True
- )
- pass
-
- if 'ams-hbase-policy' in params.config['configurations']:
- XmlConfig("hbase-policy.xml",
- conf_dir = params.hbase_conf_dir,
- configurations = params.config['configurations']['ams-hbase-policy'],
- configuration_attributes=params.config['configuration_attributes']['ams-hbase-policy'],
- owner = params.hbase_user,
- group = params.user_group
- )
- # Manually overriding ownership of file installed by hadoop package
- else:
- File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
- owner = params.hbase_user,
- group = params.user_group
- )
-
- File(format("{hbase_conf_dir}/hbase-env.sh"),
- owner = params.hbase_user,
- content=InlineTemplate(params.hbase_env_sh_template)
- )
-
- # Metrics properties
- File(os.path.join(params.hbase_conf_dir, "hadoop-metrics2-hbase.properties"),
- owner = params.hbase_user,
- group = params.user_group,
- content=Template("hadoop-metrics2-hbase.properties.j2")
- )
-
- # hbase_TemplateConfig( params.metric_prop_file_name,
- # tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
- # )
-
- hbase_TemplateConfig('regionservers', user=params.hbase_user)
-
- if params.security_enabled:
- hbase_TemplateConfig( format("hbase_{name}_jaas.conf"), user=params.hbase_user)
- hbase_TemplateConfig( format("hbase_client_jaas.conf"), user=params.hbase_user)
- hbase_TemplateConfig( format("ams_zookeeper_jaas.conf"), user=params.hbase_user)
-
- if name != "client":
- Directory( params.hbase_pid_dir,
- owner = params.hbase_user,
- create_parents = True,
- cd_access = "a",
- mode = 0755,
- )
-
- Directory (params.hbase_log_dir,
- owner = params.hbase_user,
- create_parents = True,
- cd_access = "a",
- mode = 0755,
- )
-
- if name == "master":
-
- if not params.is_local_fs_rootdir:
- # If executing Stop All, HDFS is probably down
- if action != 'stop':
-
- params.HdfsResource(params.hbase_root_dir,
- type="directory",
- action="create_on_execute",
- owner=params.hbase_user,
- mode=0775
- )
-
- params.HdfsResource(params.hbase_staging_dir,
- type="directory",
- action="create_on_execute",
- owner=params.hbase_user,
- mode=0711
- )
-
- params.HdfsResource(None, action="execute")
-
- if params.is_hbase_distributed:
- #Workaround for status commands not aware of operating mode
- File(format("{params.hbase_pid_dir}/distributed_mode"), action="create", mode=0644, owner=params.hbase_user)
-
- pass
-
- else:
-
- local_root_dir = params.hbase_root_dir
- #cut protocol name
- if local_root_dir.startswith("file://"):
- local_root_dir = local_root_dir[7:]
- #otherwise assume dir name is provided as is
-
- Directory(local_root_dir,
- owner = params.hbase_user,
- cd_access="a",
- create_parents = True
- )
-
- Execute(('chown', '-R', params.hbase_user, local_root_dir),
- sudo=True
- )
-
- File(format("{params.hbase_pid_dir}/distributed_mode"), action="delete", owner=params.hbase_user)
-
- if params.hbase_log4j_props is not None:
- File(format("{params.hbase_conf_dir}/log4j.properties"),
- mode=0644,
- group=params.user_group,
- owner=params.hbase_user,
- content=params.hbase_log4j_props
- )
- elif os.path.exists(format("{params.hbase_conf_dir}/log4j.properties")):
- File(format("{params.hbase_conf_dir}/log4j.properties"),
- mode=0644,
- group=params.user_group,
- owner=params.hbase_user
- )
-
-def hbase_TemplateConfig(name, tag=None, user=None):
- import params
-
- TemplateConfig( os.path.join(params.hbase_conf_dir, name),
- owner = user,
- template_tag = tag
- )
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_master.py
deleted file mode 100755
index b769a0de05..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_master.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-from hbase_decommission import hbase_decommission
-
-
-class HbaseMaster(Script):
- def install(self, env):
- self.install_packages(env)
-
- def configure(self, env, action = None):
- import params
- env.set_params(params)
-
- hbase('master', action)
-
- def start(self, env):
- import params
- env.set_params(params)
- self.configure(env, action = 'start') # for security
-
- hbase_service( 'master',
- action = 'start'
- )
-
- def stop(self, env):
- import params
- env.set_params(params)
-
- hbase_service( 'master',
- action = 'stop'
- )
-
- def status(self, env):
- import status_params
- env.set_params(status_params)
- pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
- check_process_status(pid_file)
-
- def decommission(self, env):
- import params
- env.set_params(params)
-
- hbase_decommission(env)
-
-
-if __name__ == "__main__":
- HbaseMaster().execute()
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_regionserver.py
deleted file mode 100755
index cf0efeff2c..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_regionserver.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-
-
-class HbaseRegionServer(Script):
- def install(self, env):
- self.install_packages(env)
-
- def configure(self, env, action = None):
- import params
- env.set_params(params)
-
- hbase('regionserver', action)
-
- def start(self, env):
- import params
- env.set_params(params)
- self.configure(env, action = 'start') # for security
-
- hbase_service( 'regionserver',
- action = 'start'
- )
-
- def stop(self, env):
- import params
- env.set_params(params)
-
- hbase_service( 'regionserver',
- action = 'stop'
- )
-
- def status(self, env):
- import status_params
- env.set_params(status_params)
- pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
- check_process_status(pid_file)
-
- def decommission(self, env):
- print "Decommission not yet implemented!"
-
-
-if __name__ == "__main__":
- HbaseRegionServer().execute()
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_service.py
deleted file mode 100755
index 5f03ca06ee..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/hbase_service.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def hbase_service(
- name,
- action = 'start'): # 'start' or 'stop' or 'status'
-
- import params
-
- role = name
- cmd = format("{daemon_script} --config {hbase_conf_dir}")
- pid_file = format("{hbase_pid_dir}/hbase-{hbase_user}-{role}.pid")
- no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-
- if action == 'start':
- daemon_cmd = format("{cmd} start {role}")
-
- Execute ( daemon_cmd,
- not_if = no_op_test,
- user = params.hbase_user
- )
- elif action == 'stop':
- daemon_cmd = format("{cmd} stop {role}")
-
- Execute ( daemon_cmd,
- user = params.hbase_user,
- # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
- timeout = 30,
- on_timeout = format("{no_op_test} && kill -9 `cat {pid_file}`")
- )
-
- File(pid_file,
- action = "delete",
- )
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/metrics_collector.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/metrics_collector.py
deleted file mode 100755
index cf498eccfa..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/metrics_collector.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.security_commons import build_expectations, \
- cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
- FILE_TYPE_XML
-from ams import ams
-from ams_service import ams_service
-from hbase import hbase
-from status import check_service_status
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-class AmsCollector(Script):
- def install(self, env):
- self.install_packages(env)
-
- def configure(self, env, action = None):
- import params
- env.set_params(params)
- hbase('master', action)
- hbase('regionserver', action)
- ams(name='collector')
-
- def start(self, env):
- self.configure(env, action = 'start') # for security
- # stop hanging components before start
- ams_service('collector', action = 'stop')
- ams_service('collector', action = 'start')
-
- def stop(self, env):
- import params
- env.set_params(params)
- # Sometimes, stop() may be called before start(), in case restart() is initiated right after installation
- self.configure(env, action = 'stop') # for security
- ams_service('collector', action = 'stop')
-
- def status(self, env):
- import status_params
- env.set_params(status_params)
- check_service_status(name='collector')
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class AmsCollectorDefault(AmsCollector):
- def security_status(self, env):
- import status_params
-
- env.set_params(status_params)
- props_value_check = {"hbase.security.authentication": "kerberos",
- "hbase.security.authorization": "true"}
-
- props_empty_check = ["hbase.zookeeper.property.authProvider.1",
- "hbase.master.keytab.file",
- "hbase.master.kerberos.principal",
- "hbase.regionserver.keytab.file",
- "hbase.regionserver.kerberos.principal"
- ]
- props_read_check = ['hbase.master.keytab.file', 'hbase.regionserver.keytab.file']
- ams_hbase_site_expectations = build_expectations('hbase-site', props_value_check,
- props_empty_check,
- props_read_check)
-
- expectations = {}
- expectations.update(ams_hbase_site_expectations)
-
- security_params = get_params_from_filesystem(status_params.ams_hbase_conf_dir,
- {'hbase-site.xml': FILE_TYPE_XML})
-
- is_hbase_distributed = security_params['hbase-site']['hbase.cluster.distributed']
- # for embedded mode, when HBase is backed by file, security state is SECURED_KERBEROS by definition when cluster is secured
- if status_params.security_enabled and not is_hbase_distributed:
- self.put_structured_out({"securityState": "SECURED_KERBEROS"})
- return
-
- result_issues = validate_security_config_properties(security_params, expectations)
-
- if not result_issues: # If all validations passed successfully
- try:
- # Double check the dict before calling execute
- if ('hbase-site' not in security_params or
- 'hbase.master.keytab.file' not in security_params['hbase-site'] or
- 'hbase.master.kerberos.principal' not in security_params['hbase-site']):
- self.put_structured_out({"securityState": "UNSECURED"})
- self.put_structured_out(
- {"securityIssuesFound": "Keytab file or principal are not set property."})
- return
-
- cached_kinit_executor(status_params.kinit_path_local,
- status_params.hbase_user,
- security_params['hbase-site']['hbase.master.keytab.file'],
- security_params['hbase-site']['hbase.master.kerberos.principal'],
- status_params.hostname,
- status_params.tmp_dir)
- self.put_structured_out({"securityState": "SECURED_KERBEROS"})
- except Exception as e:
- self.put_structured_out({"securityState": "ERROR"})
- self.put_structured_out({"securityStateErrorInfo": str(e)})
- else:
- issues = []
- for cf in result_issues:
- issues.append("Configuration file %s did not pass the validation. Reason: %s" % (
- cf, result_issues[cf]))
- self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
- self.put_structured_out({"securityState": "UNSECURED"})
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class AmsCollectorWindows(AmsCollector):
- def install(self, env):
- self.install_packages(env)
- self.configure(env) # for security
-
-if __name__ == "__main__":
- AmsCollector().execute()
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/metrics_monitor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/metrics_monitor.py
deleted file mode 100755
index f6cce8f742..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/metrics_monitor.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from ams import ams
-from ams_service import ams_service
-from status import check_service_status
-
-class AmsMonitor(Script):
- def install(self, env):
- self.install_packages(env)
- self.configure(env) # for security
-
- def configure(self, env):
- import params
- env.set_params(params)
- ams(name='monitor')
-
- def start(self, env):
- self.configure(env) # for security
-
- ams_service( 'monitor',
- action = 'start'
- )
-
- def stop(self, env):
- import params
- env.set_params(params)
-
- ams_service( 'monitor',
- action = 'stop'
- )
-
- def status(self, env):
- import status_params
- env.set_params(status_params)
- check_service_status(name='monitor')
-
-
-if __name__ == "__main__":
- AmsMonitor().execute()
-
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params.py
deleted file mode 100755
index b0a2b75958..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params.py
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from functions import calc_xmn_from_xms
-from functions import check_append_heap_property
-from functions import trim_heap_property
-
-from resource_management import *
-import status_params
-from ambari_commons import OSCheck
-
-
-if OSCheck.is_windows_family():
- from params_windows import *
-else:
- from params_linux import *
-# server configurations
-config = Script.get_config()
-exec_tmp_dir = Script.get_tmp_dir()
-
-def get_combined_memory_mb(value1, value2):
- try:
- part1 = int(value1.strip()[:-1]) if value1.lower().strip()[-1:] == 'm' else int(value1)
- part2 = int(value2.strip()[:-1]) if value2.lower().strip()[-1:] == 'm' else int(value2)
- return str(part1 + part2) + 'm'
- except:
- return None
-pass
-
-#AMBARI_METRICS data
-ams_pid_dir = status_params.ams_collector_pid_dir
-
-ams_collector_script = "/usr/sbin/ambari-metrics-collector"
-ams_collector_pid_dir = status_params.ams_collector_pid_dir
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
-if 'cluster-env' in config['configurations'] and \
- 'metrics_collector_vip_host' in config['configurations']['cluster-env']:
- metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
-else:
- metric_collector_host = ams_collector_hosts[0]
-if 'cluster-env' in config['configurations'] and \
- 'metrics_collector_vip_port' in config['configurations']['cluster-env']:
- metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
-else:
- metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
- if metric_collector_web_address.find(':') != -1:
- metric_collector_port = metric_collector_web_address.split(':')[1]
- else:
- metric_collector_port = '6188'
-
-ams_collector_log_dir = config['configurations']['ams-env']['metrics_collector_log_dir']
-ams_monitor_log_dir = config['configurations']['ams-env']['metrics_monitor_log_dir']
-
-ams_monitor_dir = "/usr/lib/python2.6/site-packages/resource_monitoring"
-ams_monitor_pid_dir = status_params.ams_monitor_pid_dir
-ams_monitor_script = "/usr/sbin/ambari-metrics-monitor"
-
-ams_hbase_home_dir = "/usr/lib/ams-hbase/"
-
-ams_hbase_normalizer_enabled = default("/configurations/ams-hbase-site/hbase.normalizer.enabled", None)
-ams_hbase_fifo_compaction_enabled = default("/configurations/ams-site/timeline.metrics.hbase.fifo.compaction.enabled", None)
-
-#hadoop params
-
-hbase_excluded_hosts = config['commandParams']['excluded_hosts']
-hbase_drain_only = config['commandParams']['mark_draining_only']
-hbase_included_hosts = config['commandParams']['included_hosts']
-
-hbase_user = status_params.hbase_user
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-hbase_root_dir = config['configurations']['ams-hbase-site']['hbase.rootdir']
-hbase_pid_dir = status_params.hbase_pid_dir
-
-is_hbase_distributed = config['configurations']['ams-hbase-site']['hbase.cluster.distributed']
-is_local_fs_rootdir = hbase_root_dir.startswith('file://')
-is_ams_distributed = config['configurations']['ams-site']['timeline.metrics.service.operation.mode'] == 'distributed'
-
-# security is disabled for embedded mode, when HBase is backed by file
-security_enabled = False if not is_hbase_distributed else config['configurations']['cluster-env']['security_enabled']
-
-# this is "hadoop-metrics.properties" for 1.x stacks
-metric_prop_file_name = "hadoop-metrics2-hbase.properties"
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-java_version = int(config['hostLevelParams']['java_version'])
-
-metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_heapsize', "512")
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
-
-hbase_log_dir = config['configurations']['ams-hbase-env']['hbase_log_dir']
-hbase_classpath_additional = default("/configurations/ams-hbase-env/hbase_classpath_additional", None)
-master_heapsize = config['configurations']['ams-hbase-env']['hbase_master_heapsize']
-regionserver_heapsize = config['configurations']['ams-hbase-env']['hbase_regionserver_heapsize']
-
-# Check if hbase java options already have appended "m". If Yes, remove the trailing m.
-metrics_collector_heapsize = check_append_heap_property(str(metrics_collector_heapsize), "m")
-master_heapsize = check_append_heap_property(str(master_heapsize), "m")
-regionserver_heapsize = check_append_heap_property(str(regionserver_heapsize), "m")
-
-regionserver_xmn_max = default('/configurations/ams-hbase-env/hbase_regionserver_xmn_max', None)
-if regionserver_xmn_max:
- regionserver_xmn_max = int(trim_heap_property(str(regionserver_xmn_max), "m"))
- regionserver_xmn_percent = config['configurations']['ams-hbase-env']['hbase_regionserver_xmn_ratio']
- regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
-else:
- regionserver_xmn_size = config['configurations']['ams-hbase-env']['regionserver_xmn_size']
-pass
-
-hbase_master_xmn_size = config['configurations']['ams-hbase-env']['hbase_master_xmn_size']
-hbase_master_maxperm_size = config['configurations']['ams-hbase-env']['hbase_master_maxperm_size']
-
-# Check if hbase java options already have appended "m". If Yes, remove the trailing m.
-hbase_master_maxperm_size = check_append_heap_property(str(hbase_master_maxperm_size), "m")
-hbase_master_xmn_size = check_append_heap_property(str(hbase_master_xmn_size), "m")
-regionserver_xmn_size = check_append_heap_property(str(regionserver_xmn_size), "m")
-
-# Choose heap size for embedded mode as sum of master + regionserver
-if not is_hbase_distributed:
- hbase_heapsize = get_combined_memory_mb(master_heapsize, regionserver_heapsize)
- if hbase_heapsize is None:
- hbase_heapsize = master_heapsize
-else:
- hbase_heapsize = master_heapsize
-
-max_open_files_limit = default("/configurations/ams-hbase-env/max_open_files_limit", "32768")
-
-if not is_hbase_distributed:
- zookeeper_quorum_hosts = 'localhost'
- zookeeper_clientPort = '61181'
-else:
- zookeeper_quorum_hosts = default("/hostname", 'localhost')
- if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
- zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
- else:
- zookeeper_clientPort = '2181'
-
-ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir']
-hbase_pid_dir = status_params.hbase_pid_dir
-_hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir']
-hbase_tmp_dir = substitute_vars(_hbase_tmp_dir, config['configurations']['ams-hbase-site'])
-_zookeeper_data_dir = config['configurations']['ams-hbase-site']['hbase.zookeeper.property.dataDir']
-zookeeper_data_dir = substitute_vars(_zookeeper_data_dir, config['configurations']['ams-hbase-site'])
-# TODO UPGRADE default, update site during upgrade
-_local_dir_conf = default('/configurations/ams-hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
-local_dir = substitute_vars(_local_dir_conf, config['configurations']['ams-hbase-site'])
-
-phoenix_max_global_mem_percent = default('/configurations/ams-site/phoenix.query.maxGlobalMemoryPercentage', '20')
-phoenix_client_spool_dir = default('/configurations/ams-site/phoenix.spool.directory', '/tmp')
-phoenix_server_spool_dir = default('/configurations/ams-hbase-site/phoenix.spool.directory', '/tmp')
-# Substitute vars if present
-phoenix_client_spool_dir = substitute_vars(phoenix_client_spool_dir, config['configurations']['ams-hbase-site'])
-phoenix_server_spool_dir = substitute_vars(phoenix_server_spool_dir, config['configurations']['ams-hbase-site'])
-
-client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
-master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
-regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
-
-rs_hosts = ["localhost"]
-
-smoke_test_user = config['configurations']['cluster-env']['smokeuser']
-smokeuser_permissions = "RWXCA"
-service_check_data = functions.get_unique_id_and_date()
-user_group = config['configurations']['cluster-env']["user_group"]
-hadoop_user = "hadoop"
-
-kinit_cmd = ""
-
-if security_enabled:
- _hostname_lowercase = config['hostname'].lower()
- client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
- smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
- hbase_user_keytab = config['configurations']['ams-hbase-env']['hbase_user_keytab']
-
- ams_collector_jaas_config_file = format("{hbase_conf_dir}/ams_collector_jaas.conf")
- ams_collector_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.myclient.keytab']
- ams_collector_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.myclient.principal'].replace('_HOST',_hostname_lowercase)
-
- ams_zookeeper_jaas_config_file = format("{hbase_conf_dir}/ams_zookeeper_jaas.conf")
- ams_zookeeper_keytab = config['configurations']['ams-hbase-security-site']['ams.zookeeper.keytab']
- ams_zookeeper_principal_name = config['configurations']['ams-hbase-security-site']['ams.zookeeper.principal'].replace('_HOST',_hostname_lowercase)
-
- master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
- master_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.master.keytab.file']
- master_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
-
- regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
- regionserver_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.regionserver.keytab.file']
- regionserver_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
-
- zk_servicename = ams_zookeeper_principal_name.rpartition('/')[0]
-
-#log4j.properties
-if (('ams-hbase-log4j' in config['configurations']) and ('content' in config['configurations']['ams-hbase-log4j'])):
- hbase_log4j_props = config['configurations']['ams-hbase-log4j']['content']
-else:
- hbase_log4j_props = None
-
-if (('ams-log4j' in config['configurations']) and ('content' in config['configurations']['ams-log4j'])):
- log4j_props = config['configurations']['ams-log4j']['content']
-else:
- log4j_props = None
-
-hbase_env_sh_template = config['configurations']['ams-hbase-env']['content']
-ams_env_sh_template = config['configurations']['ams-env']['content']
-
-hbase_staging_dir = default("/configurations/ams-hbase-site/hbase.bulkload.staging.dir", "/amshbase/staging")
-
-#for create_hdfs_directory
-hostname = config["hostname"]
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-
-
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
- HdfsResource,
- user=hdfs_user,
- security_enabled = security_enabled,
- keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local,
- hadoop_bin_dir = hadoop_bin_dir,
- hadoop_conf_dir = hadoop_conf_dir,
- principal_name = hdfs_principal_name,
- hdfs_site = hdfs_site,
- default_fs = default_fs
- )
-
-
-
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params_linux.py
deleted file mode 100755
index 838e987386..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params_linux.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from ambari_commons import OSCheck
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-config = Script.get_config()
-
-ams_collector_conf_dir = "/etc/ambari-metrics-collector/conf"
-ams_monitor_conf_dir = "/etc/ambari-metrics-monitor/conf/"
-ams_user = config['configurations']['ams-env']['ambari_metrics_user']
-#RPM versioning support
-rpm_version = default("/configurations/hadoop-env/rpm_version", None)
-
-#hadoop params
-if rpm_version is not None:
- #RPM versioning support
- rpm_version = default("/configurations/hadoop-env/rpm_version", None)
-
-hadoop_native_lib = format("/usr/lib/ams-hbase/lib/hadoop-native")
-hadoop_bin_dir = "/usr/bin"
-daemon_script = "/usr/lib/ams-hbase/bin/hbase-daemon.sh"
-region_mover = "/usr/lib/ams-hbase/bin/region_mover.rb"
-region_drainer = "/usr/lib/ams-hbase/bin/draining_servers.rb"
-hbase_cmd = "/usr/lib/ams-hbase/bin/hbase"
-
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hbase_conf_dir = "/etc/ams-hbase/conf"
-
-limits_conf_dir = "/etc/security/limits.d"
-sudo = AMBARI_SUDO_BINARY
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params_windows.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params_windows.py
deleted file mode 100755
index acb5bba8dd..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/params_windows.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.script.script import Script
-
-
-config = Script.get_config()
-
-hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
-ams_user = hadoop_user
-
-try:
- ams_collector_conf_dir = os.environ["COLLECTOR_CONF_DIR"]
- ams_collector_home_dir = os.environ["COLLECTOR_HOME"]
- hbase_cmd = os.path.join(os.environ["COLLECTOR_HOME"], "hbase", "bin", "hbase.cmd")
- hbase_conf_dir = os.path.join(os.environ["COLLECTOR_HOME"], "hbase", "conf")
-except:
- ams_collector_conf_dir = None
- ams_collector_home_dir = None
- hbase_cmd = None
- hbase_conf_dir = None
-
-try:
- ams_monitor_conf_dir = os.environ["MONITOR_CONF_DIR"]
- ams_monitor_home_dir = os.environ["MONITOR_HOME"]
-except:
- ams_monitor_conf_dir = None
- ams_monitor_home_dir = None
-
-hadoop_native_lib = os.path.join(os.environ["HADOOP_HOME"], "bin")
-hadoop_bin_dir = os.path.join(os.environ["HADOOP_HOME"], "bin")
-hadoop_conf_dir = os.path.join(os.environ["HADOOP_HOME"], "conf")
-
-from service_mapping import *
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/service_check.py
deleted file mode 100755
index f19c823478..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/service_check.py
+++ /dev/null
@@ -1,166 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.logger import Logger
-from resource_management.core.base import Fail
-from resource_management import Script
-from resource_management import Template
-
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-import httplib
-import urllib
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import os
-import random
-import time
-import socket
-
-
-class AMSServiceCheck(Script):
- AMS_METRICS_POST_URL = "/ws/v1/timeline/metrics/"
- AMS_METRICS_GET_URL = "/ws/v1/timeline/metrics?%s"
- AMS_CONNECT_TRIES = 30
- AMS_CONNECT_TIMEOUT = 15
-
- @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
- def service_check(self, env):
- from resource_management.libraries.functions.windows_service_utils import check_windows_service_exists
- import params
-
- env.set_params(params)
-
- #Just check that the services were correctly installed
- #Check the monitor on all hosts
- Logger.info("Metrics Monitor service check was started.")
- if not check_windows_service_exists(params.ams_monitor_win_service_name):
- raise Fail("Metrics Monitor service was not properly installed. Check the logs and retry the installation.")
- #Check the collector only where installed
- if params.ams_collector_home_dir and os.path.isdir(params.ams_collector_home_dir):
- Logger.info("Metrics Collector service check was started.")
- if not check_windows_service_exists(params.ams_collector_win_service_name):
- raise Fail("Metrics Collector service was not properly installed. Check the logs and retry the installation.")
-
- @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
- def service_check(self, env):
- import params
-
- Logger.info("Ambari Metrics service check was started.")
- env.set_params(params)
-
- random_value1 = random.random()
- headers = {"Content-type": "application/json"}
-
- for i in xrange(0, self.AMS_CONNECT_TRIES):
- try:
- current_time = int(time.time()) * 1000
- metric_json = Template('smoketest_metrics.json.j2', hostname=params.hostname, random1=random_value1,
- current_time=current_time).get_content()
- Logger.info("Generated metrics:\n%s" % metric_json)
-
- Logger.info("Connecting (POST) to %s:%s%s" % (params.metric_collector_host,
- params.metric_collector_port,
- self.AMS_METRICS_POST_URL))
- conn = httplib.HTTPConnection(params.metric_collector_host,
- int(params.metric_collector_port))
- conn.request("POST", self.AMS_METRICS_POST_URL, metric_json, headers)
-
- response = conn.getresponse()
- Logger.info("Http response: %s %s" % (response.status, response.reason))
- except (httplib.HTTPException, socket.error) as ex:
- if i < self.AMS_CONNECT_TRIES - 1: #range/xrange returns items from start to end-1
- time.sleep(self.AMS_CONNECT_TIMEOUT)
- Logger.info("Connection failed. Next retry in %s seconds."
- % (self.AMS_CONNECT_TIMEOUT))
- continue
- else:
- raise Fail("Metrics were not saved. Service check has failed. "
- "\nConnection failed.")
-
- data = response.read()
- Logger.info("Http data: %s" % data)
- conn.close()
-
- if response.status == 200:
- Logger.info("Metrics were saved.")
- break
- else:
- Logger.info("Metrics were not saved. Service check has failed.")
- if i < self.AMS_CONNECT_TRIES - 1: #range/xrange returns items from start to end-1
- time.sleep(self.AMS_CONNECT_TIMEOUT)
- Logger.info("Next retry in %s seconds."
- % (self.AMS_CONNECT_TIMEOUT))
- else:
- raise Fail("Metrics were not saved. Service check has failed. POST request status: %s %s \n%s" %
- (response.status, response.reason, data))
-
- get_metrics_parameters = {
- "metricNames": "AMBARI_METRICS.SmokeTest.FakeMetric",
- "appId": "amssmoketestfake",
- "hostname": params.hostname,
- "startTime": current_time - 60000,
- "endTime": current_time + 61000,
- "precision": "seconds",
- "grouped": "false",
- }
- encoded_get_metrics_parameters = urllib.urlencode(get_metrics_parameters)
-
- Logger.info("Connecting (GET) to %s:%s%s" % (params.metric_collector_host,
- params.metric_collector_port,
- self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters))
-
- conn = httplib.HTTPConnection(params.metric_collector_host,
- int(params.metric_collector_port))
- conn.request("GET", self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
- response = conn.getresponse()
- Logger.info("Http response: %s %s" % (response.status, response.reason))
-
- data = response.read()
- Logger.info("Http data: %s" % data)
- conn.close()
-
- if response.status == 200:
- Logger.info("Metrics were retrieved.")
- else:
- Logger.info("Metrics were not retrieved. Service check has failed.")
- raise Fail("Metrics were not retrieved. Service check has failed. GET request status: %s %s \n%s" %
- (response.status, response.reason, data))
- data_json = json.loads(data)
-
- def floats_eq(f1, f2, delta):
- return abs(f1-f2) < delta
-
- for metrics_data in data_json["metrics"]:
- if (str(current_time) in metrics_data["metrics"] and str(current_time + 1000) in metrics_data["metrics"]
- and floats_eq(metrics_data["metrics"][str(current_time)], random_value1, 0.0000001)
- and floats_eq(metrics_data["metrics"][str(current_time + 1000)], current_time, 1)):
- Logger.info("Values %s and %s were found in the response." % (random_value1, current_time))
- break
- pass
- else:
- Logger.info("Values %s and %s were not found in the response." % (random_value1, current_time))
- raise Fail("Values %s and %s were not found in the response." % (random_value1, current_time))
-
- Logger.info("Ambari Metrics service check is finished.")
-
-if __name__ == "__main__":
- AMSServiceCheck().execute()
-
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/service_mapping.py
deleted file mode 100755
index 2eeb427bd5..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/service_mapping.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-ams_collector_win_service_name = "AmbariMetricsCollector"
-ams_monitor_win_service_name = "AmbariMetricsHostMonitoring"
-ams_embedded_hbase_win_service_name = "ams_hbase_master" \ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/split_points.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/split_points.py
deleted file mode 100755
index fa4deaf719..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/split_points.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# !/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import sys
-import re
-import math
-import collections
-import ast
-
-metric_filename_ext = '.txt'
-# 5 regions for higher order aggregate tables
-other_region_static_count = 6
-# Max equidistant points to return per service
-max_equidistant_points = 50
-
-b_bytes = 1
-k_bytes = 1 << 10 # 1024
-m_bytes = 1 << 20 # 1024^2
-g_bytes = 1 << 30 # 1024^3
-t_bytes = 1 << 40 # 1024^4
-p_bytes = 1 << 50 # 1024^5
-
-def to_number(s):
- try:
- return int(re.sub("\D", "", s))
- except ValueError:
- return None
-
-def format_Xmx_size_to_bytes(value, default='b'):
- strvalue = str(value).lower()
- if len(strvalue) == 0:
- return 0
- modifier = strvalue[-1]
-
- if modifier == ' ' or modifier in "0123456789":
- modifier = default
-
- m = {
- modifier == 'b': b_bytes,
- modifier == 'k': k_bytes,
- modifier == 'm': m_bytes,
- modifier == 'g': g_bytes,
- modifier == 't': t_bytes,
- modifier == 'p': p_bytes
- } [1]
- return to_number(strvalue) * m
-
-# Class that takes AMS HBase configs as input and determines the Region
-# pre-splits based on selected services also passed as a parameter to the class.
-class FindSplitPointsForAMSRegions():
-
- def __init__(self, ams_hbase_site, ams_hbase_env, serviceMetricsDir,
- operation_mode = 'embedded', services = None):
- self.ams_hbase_site = ams_hbase_site
- self.ams_hbase_env = ams_hbase_env
- self.serviceMetricsDir = serviceMetricsDir
- self.services = services
- self.mode = operation_mode
- # Add host metrics if not present as input
- if self.services and 'HOST' not in self.services:
- self.services.append('HOST')
-
- # Initialize before user
- self.initialize()
-
- def initialize(self):
- # calculate regions based on available memory
- self.initialize_region_counts()
- self.initialize_ordered_set_of_metrics()
-
- def initialize_region_counts(self):
- try:
- xmx_master_bytes = format_Xmx_size_to_bytes(self.ams_hbase_env['hbase_master_heapsize'], 'm')
- xmx_region_bytes = 0
- if "hbase_regionserver_heapsize" in self.ams_hbase_env:
- xmx_region_bytes = format_Xmx_size_to_bytes(self.ams_hbase_env['hbase_regionserver_heapsize'], 'm')
- xmx_bytes = xmx_master_bytes + xmx_region_bytes
- if self.mode == 'distributed':
- xmx_bytes = xmx_region_bytes
-
- memstore_max_mem = float(self.ams_hbase_site['hbase.regionserver.global.memstore.lowerLimit']) * xmx_bytes
- memstore_flush_size = format_Xmx_size_to_bytes(self.ams_hbase_site['hbase.hregion.memstore.flush.size'])
-
- max_inmemory_regions = (memstore_max_mem / memstore_flush_size) - other_region_static_count
- print 'max_inmemory_regions: %s' % max_inmemory_regions
-
- if max_inmemory_regions > 2:
- # Lets say total = 12, so we have 7 regions to allocate between
- # METRIC_RECORD and METRIC_AGGREGATE tables, desired = (5, 2)
- self.desired_precision_region_count = int(math.floor(0.8 * max_inmemory_regions))
- self.desired_aggregate_region_count = int(max_inmemory_regions - self.desired_precision_region_count)
- else:
- self.desired_precision_region_count = 1
- self.desired_aggregate_region_count = 1
-
- except:
- print('Bad config settings, could not calculate max regions available.')
- pass
-
- def initialize_ordered_set_of_metrics(self):
- onlyServicefiles = [ f for f in os.listdir(self.serviceMetricsDir) if
- os.path.isfile(os.path.join(self.serviceMetricsDir, f)) ]
-
- metrics = set()
-
- for file in onlyServicefiles:
- # Process for services selected at deploy time or all services if
- # services arg is not passed
- if self.services is None or file.rstrip(metric_filename_ext) in self.services:
- print 'Processing file: %s' % os.path.join(self.serviceMetricsDir, file)
- service_metrics = set()
- with open(os.path.join(self.serviceMetricsDir, file), 'r') as f:
- for metric in f:
- service_metrics.add(metric.strip())
- pass
- pass
- metrics.update(self.find_equidistant_metrics(list(sorted(service_metrics))))
- pass
- pass
-
- self.metrics = sorted(metrics)
- print 'metrics length: %s' % len(self.metrics)
-
- # Pick 50 metric points for each service that are equidistant from
- # each other for a service
- def find_equidistant_metrics(self, service_metrics):
- equi_metrics = []
- idx = len(service_metrics) / max_equidistant_points
- if idx == 0:
- return service_metrics
- pass
-
- index = idx
- for i in range(0, max_equidistant_points - 1):
- equi_metrics.append(service_metrics[index - 1])
- index += idx
- pass
-
- return equi_metrics
-
- def get_split_points(self):
- split_points = collections.namedtuple('SplitPoints', [ 'precision', 'aggregate' ])
- split_points.precision = []
- split_points.aggregate = []
-
- metric_list = list(self.metrics)
- metrics_total = len(metric_list)
-
- if self.desired_precision_region_count > 1:
- idx = int(math.ceil(metrics_total / self.desired_precision_region_count))
- index = idx
- for i in range(0, self.desired_precision_region_count - 1):
- if index < metrics_total - 1:
- split_points.precision.append(metric_list[index])
- index += idx
-
- if self.desired_aggregate_region_count > 1:
- idx = int(math.ceil(metrics_total / self.desired_aggregate_region_count))
- index = idx
- for i in range(0, self.desired_aggregate_region_count - 1):
- if index < metrics_total - 1:
- split_points.aggregate.append(metric_list[index])
- index += idx
-
- return split_points
- pass
-
-def main(argv = None):
- scriptDir = os.path.realpath(os.path.dirname(argv[0]))
- serviceMetricsDir = os.path.join(scriptDir, os.pardir, 'files', 'service-metrics')
-
- if os.path.exists(serviceMetricsDir):
- onlyargs = argv[1:]
- if len(onlyargs) < 3:
- sys.stderr.write("Usage: dict(ams-hbase-site) dict(ams-hbase-env) list(services)\n")
- sys.exit(2)
- pass
-
- ams_hbase_site = None
- ams_hbase_env = None
- services = None
- try:
- ams_hbase_site = ast.literal_eval(str(onlyargs[0]))
- ams_hbase_env = ast.literal_eval(str(onlyargs[1]))
- services = onlyargs[2]
- if services:
- services = str(services).split(',')
- pass
- except Exception, ex:
- sys.stderr.write(str(ex))
- sys.stderr.write("\nUsage: Expected items not found in input. Found "
- " ams-hbase-site => {0}, ams-hbase-env => {1},"
- " services => {2}".format(ams_hbase_site, ams_hbase_env, services))
- sys.exit(2)
-
- print '--------- AMS Regions Split point finder ---------'
- print 'Services: %s' % services
-
- mode = 'distributed' if 'hbase.rootdir' in ams_hbase_site and \
- 'hdfs' in ams_hbase_site['hbase.rootdir'] else \
- 'embedded'
-
- split_point_finder = FindSplitPointsForAMSRegions(
- ams_hbase_site, ams_hbase_env, serviceMetricsDir, mode, services)
-
- result = split_point_finder.get_split_points()
- print 'Split points for precision table : %s' % len(result.precision)
- print 'precision: %s' % str(result.precision)
- print 'Split points for aggregate table : %s' % len(result.aggregate)
- print 'aggregate: %s' % str(result.aggregate)
-
- return 0
-
- else:
- print 'Cannot find service metrics dir in %s' % scriptDir
-
-if __name__ == '__main__':
- main(sys.argv)
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/status.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/status.py
deleted file mode 100755
index 59466ad40c..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/status.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-import os
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def check_service_status(name):
- if name=='collector':
- pid_file = format("{ams_collector_pid_dir}/ambari-metrics-collector.pid")
- check_process_status(pid_file)
- pid_file = format("{hbase_pid_dir}/hbase-{hbase_user}-master.pid")
- check_process_status(pid_file)
- if os.path.exists(format("{hbase_pid_dir}/distributed_mode")):
- pid_file = format("{hbase_pid_dir}/hbase-{hbase_user}-regionserver.pid")
- check_process_status(pid_file)
-
- elif name == 'monitor':
- pid_file = format("{ams_monitor_pid_dir}/ambari-metrics-monitor.pid")
- check_process_status(pid_file)
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def check_service_status(name):
- import service_mapping
- if name=='collector':
- check_windows_service_status(service_mapping.ams_collector_win_service_name)
- elif name == 'monitor':
- check_windows_service_status(service_mapping.ams_monitor_win_service_name)
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/status_params.py
deleted file mode 100755
index d446baad83..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/scripts/status_params.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from ambari_commons import OSCheck
-
-if OSCheck.is_windows_family():
- from params_windows import *
-else:
- from params_linux import *
-
-hbase_pid_dir = config['configurations']['ams-hbase-env']['hbase_pid_dir']
-hbase_user = ams_user
-ams_collector_pid_dir = config['configurations']['ams-env']['metrics_collector_pid_dir']
-ams_monitor_pid_dir = config['configurations']['ams-env']['metrics_monitor_pid_dir']
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-ams_hbase_conf_dir = format("{hbase_conf_dir}")
-
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-hostname = config['hostname']
-tmp_dir = Script.get_tmp_dir()
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams.conf.j2
deleted file mode 100755
index c5fbc9b60c..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{ams_user}} - nofile {{max_open_files_limit}}
-{{ams_user}} - nproc 65536
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams_collector_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams_collector_jaas.conf.j2
deleted file mode 100755
index f7f00ebe06..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams_collector_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{ams_collector_keytab_path}}"
-principal="{{ams_collector_jaas_princ}}";
-};
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams_zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams_zookeeper_jaas.conf.j2
deleted file mode 100755
index 19295489fe..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/ams_zookeeper_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Server {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{ams_zookeeper_keytab}}"
-principal="{{ams_zookeeper_principal_name}}";
-};
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hadoop-metrics2-hbase.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hadoop-metrics2-hbase.properties.j2
deleted file mode 100755
index a65ea881e9..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hadoop-metrics2-hbase.properties.j2
+++ /dev/null
@@ -1,63 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-hbase.period=30
-hbase.collector={{metric_collector_host}}:{{metric_collector_port}}
-
-jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-jvm.period=30
-jvm.collector={{metric_collector_host}}:{{metric_collector_port}}
-
-rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-rpc.period=30
-rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
-
-*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.slave.host.name={{hostname}}
-hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-hbase.sink.timeline.period={{metrics_collection_period}}
-hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
-hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-hbase.sink.timeline.serviceName-prefix=ams
-
-# Switch off metrics generation on a per region basis
-*.source.filter.class=org.apache.hadoop.metrics2.filter.GlobFilter
-hbase.*.source.filter.exclude=*Regions*
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_client_jaas.conf.j2
deleted file mode 100755
index 38f9721b99..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_client_jaas.conf.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_grant_permissions.j2
deleted file mode 100755
index c29c674596..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_grant_permissions.j2
+++ /dev/null
@@ -1,39 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
-exit
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_master_jaas.conf.j2
deleted file mode 100755
index a93c36c4ed..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_master_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{master_keytab_path}}"
-principal="{{master_jaas_princ}}";
-};
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_regionserver_jaas.conf.j2
deleted file mode 100755
index 709748164f..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/hbase_regionserver_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{regionserver_keytab_path}}"
-principal="{{regionserver_jaas_princ}}";
-};
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/metric_groups.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/metric_groups.conf.j2
deleted file mode 100755
index aa03d195c6..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/metric_groups.conf.j2
+++ /dev/null
@@ -1,37 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{
- "host_metric_groups": {
- "all": {
- "collect_every": "10",
- "metrics": [
- {
- "name": "bytes_out",
- "value_threshold": "128"
- }
- ]
- }
- },
- "process_metric_groups": {
- "": {
- "collect_every": "15",
- "metrics": []
- }
- }
-}
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/metric_monitor.ini.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/metric_monitor.ini.j2
deleted file mode 100755
index fc86a58096..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/metric_monitor.ini.j2
+++ /dev/null
@@ -1,31 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-[default]
-debug_level = INFO
-metrics_server = {{metric_collector_host}}:{{metric_collector_port}}
-hostname = {{hostname}}
-enable_time_threshold = false
-enable_value_threshold = false
-
-[emitter]
-send_interval = {{metrics_report_interval}}
-
-[collector]
-collector_sleep_interval = 5
-max_queue_size = 5000
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/regionservers.j2
deleted file mode 100755
index 81d060b16e..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/regionservers.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in rs_hosts %}{{host}}
-{% endfor %}
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/smoketest_metrics.json.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/smoketest_metrics.json.j2
deleted file mode 100755
index 2ee0efa14d..0000000000
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/AMBARI_METRICS/package/templates/smoketest_metrics.json.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "metrics": [
- {
- "metricname": "AMBARI_METRICS.SmokeTest.FakeMetric",
- "appid": "amssmoketestfake",
- "hostname": "{{hostname}}",
- "timestamp": {{current_time}},
- "starttime": {{current_time}},
- "metrics": {
- "{{current_time}}": {{random1}},
- "{{current_time + 1000}}": {{current_time}}
- }
- }
- ]
-}
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/widgets.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/widgets.json
index 85082d0af1..d6b1ec4c7e 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/widgets.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/widgets.json
@@ -12,15 +12,15 @@
"is_visible": true,
"metrics": [
{
- "name": "jvm.JvmMetrics.GcCount",
- "metric_path": "metrics/jvm/gcCount",
+ "name": "jvm.JvmMetrics.GcCount._rate",
+ "metric_path": "metrics/jvm/gcCount._rate",
"service_name": "HDFS",
"component_name": "NAMENODE",
"host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
},
{
- "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
- "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep",
+ "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
+ "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
"service_name": "HDFS",
"component_name": "NAMENODE",
"host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
@@ -29,11 +29,11 @@
"values": [
{
"name": "GC total count",
- "value": "${jvm.JvmMetrics.GcCount}"
+ "value": "${jvm.JvmMetrics.GcCount._rate}"
},
{
"name": "GC count of type major collection",
- "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep}"
+ "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
}
],
"properties": {
@@ -48,8 +48,8 @@
"is_visible": true,
"metrics": [
{
- "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
- "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep",
+ "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
+ "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
"service_name": "HDFS",
"component_name": "NAMENODE",
"host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
@@ -58,7 +58,7 @@
"values": [
{
"name": "GC time in major collection",
- "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep}"
+ "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
}
],
"properties": {
@@ -74,8 +74,16 @@
"is_visible": true,
"metrics": [
{
- "name": "rpc.rpc.NumOpenConnections",
- "metric_path": "metrics/rpc/NumOpenConnections",
+ "name": "rpc.rpc.client.NumOpenConnections",
+ "metric_path": "metrics/rpc/client/NumOpenConnections",
+ "category": "",
+ "service_name": "HDFS",
+ "component_name": "NAMENODE",
+ "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+ },
+ {
+ "name": "rpc.rpc.datanode.NumOpenConnections",
+ "metric_path": "metrics/rpc/datanode/NumOpenConnections",
"category": "",
"service_name": "HDFS",
"component_name": "NAMENODE",
@@ -84,8 +92,12 @@
],
"values": [
{
- "name": "Open Connections",
- "value": "${rpc.rpc.NumOpenConnections}"
+ "name": "Open Client Connections",
+ "value": "${rpc.rpc.client.NumOpenConnections}"
+ },
+ {
+ "name": "Open Datanode Connections",
+ "value": "${rpc.rpc.datanode.NumOpenConnections}"
}
],
"properties": {
@@ -184,13 +196,6 @@
"service_name": "HDFS",
"component_name": "NAMENODE",
"host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
- },
- {
- "name": "mem_cached",
- "metric_path": "metrics/memory/mem_cached",
- "service_name": "HDFS",
- "component_name": "NAMENODE",
- "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
}
],
"values": [
@@ -200,7 +205,7 @@
},
{
"name": "Memory utilization",
- "value": "${((mem_total - mem_free - mem_cached)/mem_total) * 100}"
+ "value": "${((mem_total - mem_free)/mem_total) * 100}"
}
],
"properties": {
@@ -216,15 +221,29 @@
"is_visible": true,
"metrics": [
{
- "name": "rpc.rpc.RpcQueueTimeAvgTime",
- "metric_path": "metrics/rpc/RpcQueueTime_avg_time",
+ "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
+ "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
+ "service_name": "HDFS",
+ "component_name": "NAMENODE",
+ "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+ },
+ {
+ "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
+ "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
+ "service_name": "HDFS",
+ "component_name": "NAMENODE",
+ "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+ },
+ {
+ "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
+ "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
"service_name": "HDFS",
"component_name": "NAMENODE",
"host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
},
{
- "name": "rpc.rpc.RpcProcessingTimeAvgTime",
- "metric_path": "metrics/rpc/RpcProcessingTime_avg_time",
+ "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
+ "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
"service_name": "HDFS",
"component_name": "NAMENODE",
"host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
@@ -232,12 +251,20 @@
],
"values": [
{
- "name": "RPC Queue Wait time",
- "value": "${rpc.rpc.RpcQueueTimeAvgTime}"
+ "name": "Client RPC Queue Wait time",
+ "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
+ },
+ {
+ "name": "Client RPC Processing time",
+ "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
},
{
- "name": "RPC Processing time",
- "value": "${rpc.rpc.RpcProcessingTimeAvgTime}"
+ "name": "Datanode RPC Queue Wait time",
+ "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
+ },
+ {
+ "name": "Datanode RPC Processing time",
+ "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
}
],
"properties": {
@@ -248,13 +275,13 @@
},
{
"widget_name": "NameNode Operations",
- "description": "Total number of file operation over time.",
+ "description": "Rate per second of number of file operation over time.",
"widget_type": "GRAPH",
"is_visible": false,
"metrics": [
{
- "name": "dfs.namenode.TotalFileOps",
- "metric_path": "metrics/dfs/namenode/TotalFileOps",
+ "name": "dfs.namenode.TotalFileOps._rate",
+ "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
"service_name": "HDFS",
"component_name": "NAMENODE",
"host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
@@ -263,7 +290,7 @@
"values": [
{
"name": "NameNode File Operations",
- "value": "${dfs.namenode.TotalFileOps}"
+ "value": "${dfs.namenode.TotalFileOps._rate}"
}
],
"properties": {
@@ -278,8 +305,8 @@
"is_visible": true,
"metrics": [
{
- "name": "dfs.datanode.VolumeFailures._sum",
- "metric_path": "metrics/dfs/datanode/VolumeFailures._sum",
+ "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
+ "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
"service_name": "HDFS",
"component_name": "DATANODE"
}
@@ -287,7 +314,7 @@
"values": [
{
"name": "Failed disk volumes",
- "value": "${dfs.datanode.VolumeFailures._sum}"
+ "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
}
],
"properties": {
@@ -295,8 +322,8 @@
}
},
{
- "widget_name": "Corrupted Blocks",
- "description": "Number represents data blocks that have become corrupted or missing. Its indicative of HDFS bad health.",
+ "widget_name": "Blocks With Corrupted Replicas",
+ "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
"widget_type": "NUMBER",
"is_visible": true,
"metrics": [
@@ -304,12 +331,13 @@
"name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
"metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
"service_name": "HDFS",
- "component_name": "NAMENODE"
+ "component_name": "NAMENODE",
+ "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
}
],
"values": [
{
- "name": "Corrupted Blocks",
+ "name": "Blocks With Corrupted Replicas",
"value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
}
],
@@ -328,7 +356,8 @@
"name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
"metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
"service_name": "HDFS",
- "component_name": "NAMENODE"
+ "component_name": "NAMENODE",
+ "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
}
],
"values": [
@@ -341,36 +370,12 @@
"warning_threshold": "0",
"error_threshold": "50"
}
- },
- {
- "widget_name": "DataNode JVM Heap Memory Used",
- "description": "DataNode JVM Heap Memory Used",
- "widget_type": "NUMBER",
- "is_visible": true,
- "metrics": [
- {
- "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
- "metric_path": "metrics/jvm/memHeapUsedM",
- "service_name": "HDFS",
- "component_name": "DATANODE"
- }
- ],
- "values": [
- {
- "name": "DataNode JVM Heap Memory Used",
- "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
- }
- ],
- "properties": {
- "display_unit": "MB",
- "max_limit": "512"
- }
},
{
"widget_name": "HDFS Space Utilization",
"description": "Percentage of available space used in the DFS.",
"widget_type": "GAUGE",
- "is_visible": false,
+ "is_visible": true,
"metrics": [
{
"name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed",
@@ -395,6 +400,30 @@
"warning_threshold": "0.75",
"error_threshold": "0.9"
}
+ },
+ {
+ "widget_name": "DataNode JVM Heap Memory Used",
+ "description": "DataNode JVM Heap Memory Used",
+ "widget_type": "NUMBER",
+ "is_visible": true,
+ "metrics": [
+ {
+ "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+ "metric_path": "metrics/jvm/memHeapUsedM",
+ "service_name": "HDFS",
+ "component_name": "DATANODE"
+ }
+ ],
+ "values": [
+ {
+ "name": "DataNode JVM Heap Memory Used",
+ "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+ }
+ ],
+ "properties": {
+ "display_unit": "MB",
+ "max_limit": "512"
+ }
}
]
},
@@ -411,8 +440,8 @@
"is_visible": true,
"metrics": [
{
- "name": "dfs.datanode.BytesRead",
- "metric_path": "metrics/dfs/datanode/bytes_read",
+ "name": "dfs.datanode.BytesRead._rate",
+ "metric_path": "metrics/dfs/datanode/bytes_read._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
}
@@ -420,7 +449,7 @@
"values": [
{
"name": "HDFS Bytes Read",
- "value": "${dfs.datanode.BytesRead}"
+ "value": "${dfs.datanode.BytesRead._rate}"
}
],
"properties": {
@@ -435,8 +464,8 @@
"is_visible": false,
"metrics": [
{
- "name": "dfs.datanode.BytesWritten",
- "metric_path": "metrics/dfs/datanode/bytes_written",
+ "name": "dfs.datanode.BytesWritten._rate",
+ "metric_path": "metrics/dfs/datanode/bytes_written._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
}
@@ -444,7 +473,7 @@
"values": [
{
"name": "HDFS Bytes Written",
- "value": "${dfs.datanode.BytesWritten}"
+ "value": "${dfs.datanode.BytesWritten._rate}"
}
],
"properties": {
@@ -532,26 +561,26 @@
"is_visible": false,
"metrics": [
{
- "name": "dfs.datanode.BytesRead",
- "metric_path": "metrics/dfs/datanode/bytes_read",
+ "name": "dfs.datanode.BytesRead._rate",
+ "metric_path": "metrics/dfs/datanode/bytes_read._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
},
{
- "name": "dfs.datanode.BytesWritten",
- "metric_path": "metrics/dfs/datanode/bytes_written",
+ "name": "dfs.datanode.BytesWritten._rate",
+ "metric_path": "metrics/dfs/datanode/bytes_written._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
},
{
- "name": "dfs.datanode.TotalReadTime",
- "metric_path": "metrics/dfs/datanode/total_read_time",
+ "name": "dfs.datanode.TotalReadTime._rate",
+ "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
},
{
- "name": "dfs.datanode.TotalWriteTime",
- "metric_path": "metrics/dfs/datanode/total_write_time",
+ "name": "dfs.datanode.TotalWriteTime._rate",
+ "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
}
@@ -559,7 +588,7 @@
"values": [
{
"name": "DataNode Process Disk I/O Utilization",
- "value": "${(((dfs.datanode.BytesRead/dfs.datanode.TotalReadTime)+(dfs.datanode.BytesWritten/dfs.datanode.TotalWriteTime))*50}"
+ "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
}
],
"properties": {
@@ -574,26 +603,26 @@
"is_visible": false,
"metrics": [
{
- "name": "dfs.datanode.RemoteBytesRead",
- "metric_path": "metrics/dfs/datanode/remote_bytes_read",
+ "name": "dfs.datanode.RemoteBytesRead._rate",
+ "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
},
{
- "name": "dfs.datanode.ReadsFromRemoteClient",
- "metric_path": "metrics/dfs/datanode/reads_from_remote_client",
+ "name": "dfs.datanode.ReadsFromRemoteClient._rate",
+ "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
},
{
- "name": "dfs.datanode.RemoteBytesWritten",
- "metric_path": "metrics/dfs/datanode/remote_bytes_written",
+ "name": "dfs.datanode.RemoteBytesWritten._rate",
+ "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
},
{
- "name": "dfs.datanode.WritesFromRemoteClient",
- "metric_path": "metrics/dfs/datanode/writes_from_remote_client",
+ "name": "dfs.datanode.WritesFromRemoteClient._rate",
+ "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
"service_name": "HDFS",
"component_name": "DATANODE"
}
@@ -601,7 +630,7 @@
"values": [
{
"name": "DataNode Process Network I/O Utilization",
- "value": "${((dfs.datanode.RemoteBytesRead/dfs.datanode.ReadsFromRemoteClient)+(dfs.datanode.RemoteBytesWritten/dfs.datanode.WritesFromRemoteClient))*50}"
+ "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
}
],
"properties": {
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index c2714e21ea..bd86b50b5d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -331,11 +331,17 @@ if dfs_ha_enabled:
namenode_rpc = nn_host
pass
pass
+elif 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
+ namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
else:
- namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
+ namenode_rpc = default('/configurations/core-site/fs.defaultFS', None)
if namenode_rpc:
- nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
+ port_str = namenode_rpc.split(':')[-1].strip()
+ try:
+ nn_rpc_client_port = int(port_str)
+ except ValueError:
+ nn_rpc_client_port = None
if dfs_ha_enabled:
dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)