Skip to content
This repository has been archived by the owner on Oct 25, 2024. It is now read-only.

Commit

Permalink
[Transformers] Add utils.py in tools (#1402)
Browse files Browse the repository at this point in the history
  • Loading branch information
zhentaoyu authored Mar 21, 2024
1 parent ec6c27d commit 756646b
Show file tree
Hide file tree
Showing 8 changed files with 107 additions and 53 deletions.
44 changes: 0 additions & 44 deletions intel_extension_for_transformers/neural_chat/utils/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,47 +69,3 @@ def is_hf_model(model_name_or_path):

def supported_gpus():
return ['flex', 'max', 'arc']

def get_gpu_family():
''' Get gpu device family info.
Return 'flex'|'max'|'arc'| 'no_gpu'| assert
Note, this function need to import intel_extension_for_pytorch
Additional info (common gpu name):
'Intel(R) Data Center GPU Flex 170'
'Intel(R) Data Center GPU Max 1100'
'Intel(R) Arc(TM) A770 Graphics'
'''

import intel_extension_for_pytorch as ipex
if not (hasattr(torch, "xpu") and torch.xpu.is_available()):
return 'no_gpu'

name = torch.xpu.get_device_name()
if 'GPU Flex' in name:
result = 'flex'
elif 'GPU Max' in name:
result = 'max'
elif 'Arc(TM)' in name:
result = 'arc'
else:
assert False, "Unsupported GPU device: {}".format(name)

if result not in supported_gpus():
assert False, "Unsupported GPU device: {}".format(name)
else:
return result

_autoround_available = importlib.util.find_spec("auto_round") is not None
_autoround_version = "N/A"
if _autoround_available:
try:
_autoround_version = importlib_metadata.version("auto_round")
except importlib_metadata.PackageNotFoundError:
_autoround_available = False

def is_autoround_available():
return _autoround_available
101 changes: 101 additions & 0 deletions intel_extension_for_transformers/tools/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Utility."""

import importlib
import sys
import torch
if sys.version_info < (3, 8):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata

try:
import habana_frameworks.torch.hpu as hthpu
is_hpu_available = True
except ImportError:
is_hpu_available = False

def supported_gpus():
return ['flex', 'max', 'arc']

def get_gpu_family():
''' Get gpu device family info.
Return 'flex'|'max'|'arc'| 'no_gpu'| assert
Note, this function need to import intel_extension_for_pytorch
Additional info (common gpu name):
'Intel(R) Data Center GPU Flex 170'
'Intel(R) Data Center GPU Max 1100'
'Intel(R) Arc(TM) A770 Graphics'
'''

import intel_extension_for_pytorch as ipex
if not (hasattr(torch, "xpu") and torch.xpu.is_available()):
return 'no_gpu'

name = torch.xpu.get_device_name()
if 'GPU Flex' in name:
result = 'flex'
elif 'GPU Max' in name:
result = 'max'
elif 'Arc(TM)' in name:
result = 'arc'
else:
assert False, "Unsupported GPU device: {}".format(name)

if result not in supported_gpus():
assert False, "Unsupported GPU device: {}".format(name)
else:
return result

_ipex_available = importlib.util.find_spec("intel_extension_for_pytorch") is not None
_ipex_version = "N/A"
if _ipex_available:
try:
_ipex_version = importlib_metadata.version("intel_extension_for_pytorch")
except importlib_metadata.PackageNotFoundError:
_ipex_available = False

def is_ipex_available():
return _ipex_available

_autoround_available = importlib.util.find_spec("auto_round") is not None
_autoround_version = "N/A"
if _autoround_available:
try:
_autoround_version = importlib_metadata.version("auto_round")
except importlib_metadata.PackageNotFoundError:
_autoround_available = False

def is_autoround_available():
return _autoround_available

def get_device_type():
if torch.cuda.is_available():
device = "cuda"
elif is_hpu_available:
device = "hpu"
elif is_ipex_available() and torch.xpu.is_available():
device = "xpu"
else:
device = "cpu"
return device
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training
import time
import logging
from intel_extension_for_transformers.neural_chat.utils.common import is_hpu_available
from intel_extension_for_transformers.tools.utils import is_hpu_available

logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@
from transformers.integrations.deepspeed import (
is_deepspeed_available,
)
from intel_extension_for_transformers.neural_chat.utils.common import is_hpu_available
from intel_extension_for_transformers.neural_chat.utils.common import get_device_type
from intel_extension_for_transformers.tools.utils import is_hpu_available
from intel_extension_for_transformers.tools.utils import get_device_type


if is_bitsandbytes_available():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from neural_compressor.adaptor.torch_utils.model_wrapper import WeightOnlyLinear
from neural_compressor.utils.utility import LazyImport
from neural_compressor.config import PostTrainingQuantConfig
from intel_extension_for_transformers.neural_chat.utils.common import is_ipex_available, is_autoround_available
from intel_extension_for_transformers.tools.utils import is_ipex_available, is_autoround_available
from transformers import AutoTokenizer

if is_ipex_available():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from safetensors.torch import load_file as safe_load_file
from transformers import PreTrainedModel
import importlib
from intel_extension_for_transformers.neural_chat.utils.common import is_hpu_available
from intel_extension_for_transformers.tools.utils import is_hpu_available


def is_peft_available():
Expand Down
3 changes: 0 additions & 3 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
fastapi
py-cpuinfo
setuptools>=65
setuptools_scm[toml]>=6.2
uvicorn
yacs
2 changes: 1 addition & 1 deletion tests/CI/test_weight_only_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from intel_extension_for_transformers.transformers import GPTQConfig, RtnConfig
from math import isclose
from transformers import AutoTokenizer
from intel_extension_for_transformers.neural_chat.utils.common import get_gpu_family, is_ipex_available
from intel_extension_for_transformers.tools.utils import get_gpu_family, is_ipex_available
from torch.utils.data import DataLoader


Expand Down

0 comments on commit 756646b

Please sign in to comment.