Initial commit with BSL
This commit is contained in:
@@ -0,0 +1,43 @@
|
||||
#
|
||||
# OtterTune - create_metric_settings.py
|
||||
#
|
||||
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
|
||||
#
|
||||
import json
|
||||
import shutil
|
||||
|
||||
|
||||
def main():
|
||||
final_metrics = []
|
||||
with open('oracle.txt', 'r') as f:
|
||||
odd = 0
|
||||
entry = {}
|
||||
fields = {}
|
||||
lines = f.readlines()
|
||||
for line in lines:
|
||||
line = line.strip().replace("\n", "")
|
||||
if not line:
|
||||
continue
|
||||
if line == 'NAME' or line.startswith('-'):
|
||||
continue
|
||||
if odd == 0:
|
||||
entry = {}
|
||||
entry['model'] = 'website.MetricCatalog'
|
||||
fields = {}
|
||||
fields['name'] = "global." + line
|
||||
fields['summary'] = line
|
||||
fields['vartype'] = 2 # int
|
||||
fields['scope'] = 'global'
|
||||
fields['metric_type'] = 3 # stat
|
||||
if fields['name'] == "global.user commits":
|
||||
fields['metric_type'] = 1 # counter
|
||||
fields['dbms'] = 18 # oracle
|
||||
entry['fields'] = fields
|
||||
final_metrics.append(entry)
|
||||
with open('oracle_metrics.json', 'w') as f:
|
||||
json.dump(final_metrics, f, indent=4)
|
||||
shutil.copy('oracle_metrics.json', '../../../../website/fixtures/oracle_metrics.json')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1
server/website/script/fixture_generators/metric_settings/postgres_9.6/.gitignore
vendored
Normal file
1
server/website/script/fixture_generators/metric_settings/postgres_9.6/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
postgres-96_numeric_metric_names.json
|
||||
@@ -0,0 +1,120 @@
|
||||
#
|
||||
# OtterTune - create_metric_settings.py
|
||||
#
|
||||
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
|
||||
#
|
||||
import json
|
||||
import shutil
|
||||
|
||||
COUNTER = 1
|
||||
INFO = 2
|
||||
|
||||
INTEGER = 2
|
||||
STRING = 1
|
||||
TIMESTAMP = 6
|
||||
NUMERIC_TYPES = ['oid', 'bigint', 'double precision', 'integer']
|
||||
|
||||
|
||||
def load_data(filename):
|
||||
with open(filename, 'r') as f:
|
||||
csv_stats = f.readlines()
|
||||
header = csv_stats[0].strip().split(',')
|
||||
stats_dict = {}
|
||||
for line in csv_stats[1:]:
|
||||
parts = line.strip().split(',', 3)
|
||||
assert len(parts) == 4, "parts: {}".format(parts)
|
||||
stat = {}
|
||||
stat['name'] = parts[header.index('column_name')]
|
||||
stat['summary'] = parts[header.index('description')]
|
||||
stat['metric_type'] = parts[header.index('metric_type')]
|
||||
vartype = parts[header.index('data_type')]
|
||||
if vartype in NUMERIC_TYPES:
|
||||
vartype = INTEGER
|
||||
elif vartype == 'name' or vartype == 'text':
|
||||
vartype = STRING
|
||||
elif vartype.startswith('timestamp'):
|
||||
vartype = TIMESTAMP
|
||||
else:
|
||||
raise Exception(vartype)
|
||||
stat['vartype'] = vartype
|
||||
stats_dict[stat['name']] = stat
|
||||
return stats_dict
|
||||
|
||||
|
||||
def main():
|
||||
dbstats = load_data('pg96_database_stats.csv')
|
||||
gstats = load_data('pg96_global_stats.csv')
|
||||
istats = load_data('pg96_index_stats.csv')
|
||||
tstats = load_data('pg96_table_stats.csv')
|
||||
|
||||
with open('metrics_sample.json', 'r') as f:
|
||||
metrics = json.load(f)
|
||||
|
||||
final_metrics = []
|
||||
numeric_metric_names = []
|
||||
vartypes = set()
|
||||
for view_name, mets in sorted(metrics.items()):
|
||||
if 'database' in view_name:
|
||||
scope = 'database'
|
||||
stats = dbstats
|
||||
elif 'indexes' in view_name:
|
||||
scope = 'index'
|
||||
stats = istats
|
||||
elif 'tables' in view_name:
|
||||
scope = 'table'
|
||||
stats = tstats
|
||||
else:
|
||||
scope = 'global'
|
||||
stats = gstats
|
||||
|
||||
for metric_name in mets:
|
||||
entry = {}
|
||||
entry['model'] = 'website.MetricCatalog'
|
||||
mstats = stats[metric_name]
|
||||
fields = {}
|
||||
fields['name'] = '{}.{}'.format(view_name, metric_name)
|
||||
fields['vartype'] = mstats['vartype']
|
||||
vartypes.add(fields['vartype'])
|
||||
fields['summary'] = mstats['summary']
|
||||
fields['scope'] = scope
|
||||
metric_type = mstats['metric_type']
|
||||
if metric_type == 'counter':
|
||||
numeric_metric_names.append(fields['name'])
|
||||
mt = COUNTER
|
||||
elif metric_type == 'info':
|
||||
mt = INFO
|
||||
else:
|
||||
raise Exception('Invalid metric type: {}'.format(metric_type))
|
||||
fields['metric_type'] = mt
|
||||
fields['dbms'] = 1
|
||||
entry['fields'] = fields
|
||||
final_metrics.append(entry)
|
||||
# sorted_metric_names.append(fields['name'])
|
||||
|
||||
with open('postgres-96_metrics.json', 'w') as f:
|
||||
json.dump(final_metrics, f, indent=4)
|
||||
|
||||
shutil.copy('postgres-96_metrics.json', '../../../../website/fixtures/postgres-96_metrics.json')
|
||||
|
||||
with open('postgres-96_numeric_metric_names.json', 'w') as f:
|
||||
json.dump(numeric_metric_names, f, indent=4)
|
||||
|
||||
# sorted_metrics = [{
|
||||
# 'model': 'website.PipelineResult',
|
||||
# 'fields': {
|
||||
# "dbms": 1,
|
||||
# "task_type": 2,
|
||||
# "component": 4,
|
||||
# "hardware": 17,
|
||||
# "version_id": 0,
|
||||
# "value": json.dumps(sorted_metric_names),
|
||||
# }
|
||||
# }]
|
||||
# fname = 'postgres-96_sorted_metric_labels.json'
|
||||
# with open(fname, 'w') as f:
|
||||
# json.dump(sorted_metrics, f, indent=4)
|
||||
# shutil.copy(fname, '../../../preload/')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1 @@
|
||||
{"pg_stat_database_conflicts": ["datname", "confl_deadlock", "confl_bufferpin", "datid", "confl_tablespace", "confl_lock", "confl_snapshot"], "pg_stat_user_indexes": ["indexrelid", "relid", "indexrelname", "relname", "idx_tup_fetch", "idx_tup_read", "idx_scan", "schemaname"], "pg_stat_archiver": ["failed_count", "archived_count", "stats_reset", "last_archived_time", "last_failed_time", "last_failed_wal", "last_archived_wal"], "pg_stat_database": ["numbackends", "datname", "blks_read", "deadlocks", "tup_fetched", "tup_updated", "stats_reset", "tup_inserted", "datid", "xact_commit", "tup_deleted", "blk_read_time", "xact_rollback", "conflicts", "blks_hit", "tup_returned", "temp_files", "blk_write_time", "temp_bytes"], "pg_stat_user_tables": ["last_vacuum", "n_tup_ins", "n_dead_tup", "last_analyze", "idx_tup_fetch", "n_tup_upd", "schemaname", "seq_tup_read", "vacuum_count", "n_mod_since_analyze", "n_tup_del", "last_autovacuum", "seq_scan", "relid", "n_tup_hot_upd", "autoanalyze_count", "n_live_tup", "relname", "last_autoanalyze", "idx_scan", "autovacuum_count", "analyze_count"], "pg_stat_bgwriter": ["buffers_backend", "checkpoints_timed", "buffers_alloc", "buffers_clean", "buffers_backend_fsync", "checkpoint_sync_time", "checkpoints_req", "checkpoint_write_time", "maxwritten_clean", "buffers_checkpoint", "stats_reset"], "pg_statio_user_indexes": ["indexrelid", "relid", "indexrelname", "idx_blks_hit", "relname", "idx_blks_read", "schemaname"], "pg_statio_user_tables": ["relid", "heap_blks_hit", "tidx_blks_read", "tidx_blks_hit", "toast_blks_hit", "idx_blks_hit", "relname", "toast_blks_read", "idx_blks_read", "schemaname", "heap_blks_read"]}
|
||||
@@ -0,0 +1,25 @@
|
||||
column_name,data_type,metric_type,description
|
||||
blk_read_time,double precision,counter,"Time spent reading data file blocks by backends in this database, in milliseconds"
|
||||
blks_hit,bigint,counter,"Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)"
|
||||
blks_read,bigint,counter,Number of disk blocks read in this database
|
||||
blk_write_time,double precision,counter,"Time spent writing data file blocks by backends in this database, in milliseconds"
|
||||
conflicts,bigint,counter,"Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)"
|
||||
datid,oid,info,OID of a database
|
||||
datname,name,info,Name of this database
|
||||
deadlocks,bigint,counter,Number of deadlocks detected in this database
|
||||
numbackends,integer,info,Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.
|
||||
stats_reset,timestamp with time zone,info,Time at which these statistics were last reset
|
||||
temp_bytes,bigint,counter,"Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting."
|
||||
temp_files,bigint,counter,"Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting."
|
||||
tup_deleted,bigint,counter,Number of rows deleted by queries in this database
|
||||
tup_fetched,bigint,counter,Number of rows fetched by queries in this database
|
||||
tup_inserted,bigint,counter,Number of rows inserted by queries in this database
|
||||
tup_returned,bigint,counter,Number of rows returned by queries in this database
|
||||
tup_updated,bigint,counter,Number of rows updated by queries in this database
|
||||
xact_commit,bigint,counter,Number of transactions in this database that have been committed
|
||||
xact_rollback,bigint,counter,Number of transactions in this database that have been rolled back
|
||||
confl_tablespace,bigint,counter,Number of queries in this database that have been canceled due to dropped tablespaces
|
||||
confl_lock,bigint,counter,Number of queries in this database that have been canceled due to lock timeouts
|
||||
confl_snapshot,bigint,counter,Number of queries in this database that have been canceled due to old snapshots
|
||||
confl_bufferpin,bigint,counter,Number of queries in this database that have been canceled due to pinned buffers
|
||||
confl_deadlock,bigint,counter,Number of queries in this database that have been canceled due to deadlocks
|
||||
|
@@ -0,0 +1,19 @@
|
||||
column_name,data_type,metric_type,description
|
||||
buffers_alloc,bigint,counter,Number of buffers allocated
|
||||
buffers_backend,bigint,counter,Number of buffers written directly by a backend
|
||||
buffers_backend_fsync,bigint,counter,Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)
|
||||
buffers_checkpoint,bigint,counter,Number of buffers written during checkpoints
|
||||
buffers_clean,bigint,counter,Number of buffers written by the background writer
|
||||
checkpoints_req,bigint,counter,Number of requested checkpoints that have been performed
|
||||
checkpoints_timed,bigint,counter,Number of scheduled checkpoints that have been performed
|
||||
checkpoint_sync_time,double precision,counter,"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds"
|
||||
checkpoint_write_time,double precision,counter,"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds"
|
||||
maxwritten_clean,bigint,counter,Number of times the background writer stopped a cleaning scan because it had written too many buffers
|
||||
stats_reset,timestamp with time zone,info,Time at which these statistics were last reset
|
||||
archived_count,bigint,counter,Number of WAL files that have been successfully archived
|
||||
last_archived_wal,text,info,Name of the last WAL file successfully archived
|
||||
last_archived_time,timestamp with time zone,info,Time of the last successful archive operation
|
||||
failed_count,bigint,counter,Number of failed attempts for archiving WAL files
|
||||
last_failed_wal,text,info,Name of the WAL file of the last failed archival operation
|
||||
last_failed_time,timestamp with time zone,info,Time of the last failed archival operation
|
||||
stats_reset,timestamp with time zone,info,Time at which these statistics were last reset
|
||||
|
@@ -0,0 +1,11 @@
|
||||
column_name,data_type,metric_type,description
|
||||
idx_blks_hit,bigint,counter,Number of buffer hits in this index
|
||||
idx_blks_read,bigint,counter,Number of disk blocks read from this index
|
||||
idx_scan,bigint,counter,Number of index scans initiated on this index
|
||||
idx_tup_fetch,bigint,counter,Number of live table rows fetched by simple index scans using this index
|
||||
idx_tup_read,bigint,counter,Number of index entries returned by scans on this index
|
||||
indexrelid,oid,info,OID of this index
|
||||
indexrelname,name,info,Name of this index
|
||||
relid,oid,info,OID of the table for this index
|
||||
relname,name,info,Name of the table for this index
|
||||
schemaname,name,info,Name of the schema this index is in
|
||||
|
@@ -0,0 +1,31 @@
|
||||
column_name,data_type,metric_type,description
|
||||
analyze_count,bigint,counter,Number of times this table has been manually analyzed
|
||||
autoanalyze_count,bigint,counter,Number of times this table has been analyzed by the autovacuum daemon
|
||||
autovacuum_count,bigint,counter,Number of times this table has been vacuumed by the autovacuum daemon
|
||||
heap_blks_hit,bigint,counter,Number of buffer hits in this table
|
||||
heap_blks_read,bigint,counter,Number of disk blocks read from this table
|
||||
idx_blks_hit,bigint,counter,Number of buffer hits in all indexes on this table
|
||||
idx_blks_read,bigint,counter,Number of disk blocks read from all indexes on this table
|
||||
idx_scan,bigint,counter,Number of index scans initiated on this table
|
||||
idx_tup_fetch,bigint,counter,Number of live rows fetched by index scans
|
||||
last_analyze,timestamp with time zone,info,Last time at which this table was manually analyzed
|
||||
last_autoanalyze,timestamp with time zone,info,Last time at which this table was analyzed by the autovacuum daemon
|
||||
last_autovacuum,timestamp with time zone,info,Last time at which this table was vacuumed by the autovacuum daemon
|
||||
last_vacuum,timestamp with time zone,info,Last time at which this table was manually vacuumed (not counting VACUUM FULL)
|
||||
n_dead_tup,bigint,counter,Estimated number of dead rows
|
||||
n_live_tup,bigint,counter,Estimated number of live rows
|
||||
n_tup_del,bigint,counter,Number of rows deleted
|
||||
n_tup_hot_upd,bigint,counter,"Number of rows HOT updated (i.e., with no separate index update required)"
|
||||
n_tup_ins,bigint,counter,Number of rows inserted
|
||||
n_tup_upd,bigint,counter,Number of rows updated
|
||||
n_mod_since_analyze,bigint,counter,Estimated number of rows modified since this table was last analyzed
|
||||
relid,oid,info,OID of a table
|
||||
relname,name,info,Name of this table
|
||||
schemaname,name,info,Name of the schema that this table is in
|
||||
seq_scan,bigint,counter,Number of sequential scans initiated on this table
|
||||
seq_tup_read,bigint,counter,Number of live rows fetched by sequential scans
|
||||
tidx_blks_hit,bigint,counter,Number of buffer hits in this table's TOAST table index (if any)
|
||||
tidx_blks_read,bigint,counter,Number of disk blocks read from this table's TOAST table index (if any)
|
||||
toast_blks_hit,bigint,counter,Number of buffer hits in this table's TOAST table (if any)
|
||||
toast_blks_read,bigint,counter,Number of disk blocks read from this table's TOAST table (if any)
|
||||
vacuum_count,bigint,counter,Number of times this table has been manually vacuumed (not counting VACUUM FULL)
|
||||
|
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user