if_mib: + auth: + community: public walk: - 1.3.6.1.2.1.2 - 1.3.6.1.2.1.31.1.1 + - 1.3.6.1.4.1.2636.3.39.1.12 + - 1.3.6.1.4.1.2636.3.1 get: - 1.3.6.1.2.1.1.3.0 metrics: + - name: jnxAvailableSession + oid: 1.3.6.1.4.1.2636.3.39.1.12.1.1.1.7 + type: gauge + help: Juniper_SRX_MonitoringMaxFlowSession + - name: FlowSessionsCount + oid: 1.3.6.1.4.1.2636.3.39.1.12.1.1.1.6.0 + type: gauge + help: Current number of Flow sessions + - name: jnxJsSPUMonitoringMemoryUsage + oid: 1.3.6.1.4.1.2636.3.39.1.12.1.1.1.5.0 + type: gauge + help: Current memory usage of SPU(CPU) in percentage. + - name: jnxJsSPUMonitoringCPUUsage + oid: 1.3.6.1.4.1.2636.3.1.13.1.8.9.1.0.0 + type: gauge + help: Current Services Processing Unit's -SPU (CPU) Utilization in percentage. + - name: jnxOperatingTemp + oid: 1.3.6.1.4.1.2636.3.1.13.1.7.9.1.0 + type: gauge + help: The temperature in Celsius (degrees C) of this subject. Zero if unavailable or inapplicable + - name: jnxOperatingMemoryRE1 + oid: 1.3.6.1.4.1.2636.3.1.13.1.15.9.2.0.0 + type: gauge + help: The installed memory size in Megabytes of this subject. Zero if unavailable or inapplicable. + - name: jnxOperatingMemoryRE0 + oid: 1.3.6.1.4.1.2636.3.1.13.1.15.9.1.0.0 + type: gauge + help: The installed memory size in Megabytes of this subject. Zero if unavailable or inapplicable. - name: sysUpTime oid: 1.3.6.1.2.1.1.3 type: gauge help: The time (in hundredths of a second) since the network management portion of the system was last re-initialized. - 1.3.6.1.2.1.1.3
# my global config global: scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s).
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: # - "first_rules.yml" # - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. - job_name: "prometheus"
# metrics_path defaults to '/metrics' # scheme defaults to 'http'.