1 Answer
- Newest
- Most votes
- Most comments
0
Example Script to Gather Metrics
import boto3
from datetime import datetime, timedelta
# Initialize boto3 clients
cloudwatch = boto3.client('cloudwatch')
# Define the time period
end_time = datetime.utcnow()
start_time = end_time - timedelta(days=7) # last 7 days
def get_metric_statistics(metric_name, namespace, period, start_time, end_time, statistics, dimensions):
response = cloudwatch.get_metric_statistics(
Namespace=namespace,
MetricName=metric_name,
StartTime=start_time,
EndTime=end_time,
Period=period,
Statistics=statistics,
Dimensions=dimensions
)
return response['Datapoints']
# Get average cache size in GB
bytes_used_for_cache = get_metric_statistics(
metric_name='BytesUsedForCache',
namespace='AWS/ElastiCache',
period=3600,
start_time=start_time,
end_time=end_time,
statistics=['Average'],
dimensions=[{'Name': 'CacheClusterId', 'Value': 'your-cluster-id'}]
)
average_cache_size_gb = sum([datapoint['Average'] for datapoint in bytes_used_for_cache]) / len(bytes_used_for_cache) / (1024 ** 3)
# Get average request rate
cmd_get = get_metric_statistics(
metric_name='CmdGet',
namespace='AWS/ElastiCache',
period=3600,
start_time=start_time,
end_time=end_time,
statistics=['Sum'],
dimensions=[{'Name': 'CacheClusterId', 'Value': 'your-cluster-id'}]
)
cmd_set = get_metric_statistics(
metric_name='CmdSet',
namespace='AWS/ElastiCache',
period=3600,
start_time=start_time,
end_time=end_time,
statistics=['Sum'],
dimensions=[{'Name': 'CacheClusterId', 'Value': 'your-cluster-id'}]
)
average_request_rate = (sum([datapoint['Sum'] for datapoint in cmd_get]) + sum([datapoint['Sum'] for datapoint in cmd_set])) / (7 * 24 * 3600)
# Get average data transferred per request in KB
network_bytes_in = get_metric_statistics(
metric_name='NetworkBytesIn',
namespace='AWS/ElastiCache',
period=3600,
start_time=start_time,
end_time=end_time,
statistics=['Sum'],
dimensions=[{'Name': 'CacheClusterId', 'Value': 'your-cluster-id'}]
)
network_bytes_out = get_metric_statistics(
metric_name='NetworkBytesOut',
namespace='AWS/ElastiCache',
period=3600,
start_time=start_time,
end_time=end_time,
statistics=['Sum'],
dimensions=[{'Name': 'CacheClusterId', 'Value': 'your-cluster-id'}]
)
total_network_bytes = sum([datapoint['Sum'] for datapoint in network_bytes_in]) + sum([datapoint['Sum'] for datapoint in network_bytes_out])
total_requests = sum([datapoint['Sum'] for datapoint in cmd_get]) + sum([datapoint['Sum'] for datapoint in cmd_set])
average_data_per_request_kb = (total_network_bytes / total_requests) / 1024
print(f"Average Cache Size (GB): {average_cache_size_gb}")
print(f"Average Request Rate (requests/second): {average_request_rate}")
print(f"Average Data Transferred per Request (KB): {average_data_per_request_kb}")
Relevant content
- asked 5 months ago
- AWS OFFICIALUpdated 2 months ago
- AWS OFFICIALUpdated 2 years ago
- AWS OFFICIALUpdated a year ago
- AWS OFFICIALUpdated 9 months ago
Is it like we dont need to consider DatabaseMemoryUsagePercentage metric for Average Cache Data Size (in GB)
and CurrConnections, NewConnections, Evictions, GetTypeCmds, SetTypeCmds, KeySpaceHits, and KeySpaceMisses metrics for Average Simple Request Rate (per second)