-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutil.py
49 lines (41 loc) · 1.62 KB
/
util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import os
import numpy as np
import random
import torch
import psutil
def seed_everything(seed=1226):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
def get_gpu_mem_info(gpu_id=0):
"""
根据显卡 id 获取显存使用信息, 单位 MB
:param gpu_id: 显卡 ID
:return: total 所有的显存,used 当前使用的显存, free 可使用的显存
"""
import pynvml
pynvml.nvmlInit()
if gpu_id < 0 or gpu_id >= pynvml.nvmlDeviceGetCount():
print(r'gpu_id {} 对应的显卡不存在!'.format(gpu_id))
return 0, 0, 0
handler = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handler)
total = round(meminfo.total / 1024 / 1024, 2)
used = round(meminfo.used / 1024 / 1024, 2)
free = round(meminfo.free / 1024 / 1024, 2)
return total, used, free
def get_cpu_mem_info():
"""
获取当前机器的内存信息, 单位 MB
:return: mem_total 当前机器所有的内存 mem_free 当前机器可用的内存 mem_process_used 当前进程使用的内存
"""
mem_total = round(psutil.virtual_memory().total / 1024 / 1024, 2)
mem_free = round(psutil.virtual_memory().available / 1024 / 1024, 2)
mem_process_used = round(psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024, 2)
return mem_total, mem_free, mem_process_used