import sys
= set(sys.modules) & set(globals())
modulenames = [sys.modules[name] for name in modulenames]
allmodules allmodules
Python General
To see what modules have been imported use:
To see if a particular module is imported:
for i, x in enumerate(allmodules) if "'fastai'" in str(x)] != [] [i
To see memory stats (CPU/GPU/top variables):
import psutil
import torch
def print_cpu_memory(verbose=True):
# Get the current memory usage
= psutil.virtual_memory()
memory_info
# Extract the memory information
= memory_info.total
total_memory = memory_info.available
available_memory = memory_info.used
used_memory = memory_info.percent
percent_memory
# Convert bytes to megabytes
= total_memory / 1024**2
total_memory_mb = available_memory / 1024**2
available_memory_mb = used_memory / 1024**2
used_memory_mb
if verbose:
# Print the memory information
print(f"Total CPU memory: {total_memory_mb:.2f} MB")
print(f"Available CPU memory: {available_memory_mb:.2f} MB")
print(f"Used CPU memory: {used_memory_mb:.2f} MB")
print(f"Percentage of used CPU memory: {percent_memory}%")
else:
print(f"Percentage of used CPU memory: {percent_memory}%")
def print_gpu_memory():
# Check if CUDA is available
if torch.cuda.is_available():
# Get the default CUDA device
= torch.cuda.current_device()
device
# Get the total memory and currently allocated memory on the device
= torch.cuda.get_device_properties(device).total_memory
total_memory = torch.cuda.memory_allocated(device)
allocated_memory
# Convert bytes to megabytes
= total_memory / 1024**2
total_memory_mb = allocated_memory / 1024**2
allocated_memory_mb
# Print the memory information
print(f"Total GPU memory: {total_memory_mb:.2f} MB")
print(f"Allocated GPU memory: {allocated_memory_mb:.2f} MB")
else:
print("CUDA is not available")
def print_top_memory_variables(local_vars, var_number_to_print=5):
"""Prints top variables in terms of memory.
Usage: `print_top_memory_variables(locals().copy())` can't call locals() in the function itself.
Args:
local_vars (dict): pass `locals().copy()`
var_number_to_print(int):
"""
# Get the local variables
= {}
memory
# Iterate over the local variables and print their sizes
for var_name, var_value in local_vars.items():
= sys.getsizeof(var_value)
var_size = var_size
memory[var_name]
= sorted(memory.items(), key=lambda x: x[1], reverse=True)[:var_number_to_print]
memory_sorted
for (var_name, var_size) in memory_sorted:
print(f"Variable: {var_name}, Size: {var_size} bytes")
Profiling
Profiling code is useful in many ways. Python has a built-in module called cProfile
. One can also visualize the profile file using snakeviz
. Here is an example:
import cProfile
import pstats
import time
def sum(a,b):
return a+b
def print_many():
for i in range(100000):
print(i)
def main():
print_many()print(sum(1,2))
just_wait()
def just_wait():
2)
time.sleep(
if __name__ == "__main__":
with cProfile.Profile() as pr:
main()
= pstats.Stats(pr)
results
results.sort_stats(pstats.SortKey.TIME)
results.print_stats()"speed_test.prof") # run `snakeviz speed_test.prof` to see the results in a browser (pip install snakeviz if needed) results.dump_stats(
this gives following output:
100008 function calls in 2.240 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
1 2.005 2.005 2.005 2.005 {built-in method time.sleep}
100001 0.220 0.000 0.220 0.000 {built-in method builtins.print}
1 0.015 0.015 0.235 0.235 /Users/nenadbozinovic/Documents/chatbot/speed_test.py:10(print_many)
1 0.000 0.000 0.000 0.000 /Users/nenadbozinovic/miniconda3/envs/blog/lib/python3.11/cProfile.py:118(__exit__)
1 0.000 0.000 2.240 2.240 /Users/nenadbozinovic/Documents/chatbot/speed_test.py:15(main)
1 0.000 0.000 2.005 2.005 /Users/nenadbozinovic/Documents/chatbot/speed_test.py:21(just_wait)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1 0.000 0.000 0.000 0.000 /Users/nenadbozinovic/Documents/chatbot/speed_test.py:6(sum)
(there is some extension too that I haven’t tried yet).