Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feature/vue support vue3+vite #763

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions demo-vue/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*

node_modules
dist
dist-ssr
*.local

# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
13 changes: 13 additions & 0 deletions demo-vue/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
<!doctype html>
<html lang="en" class="w-full h-full">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Vite + Vue</title>
</head>
<body class="w-full h-full">
<div id="app" class="w-full h-full"></div>
<script type="module" src="/src/main.js"></script>
</body>
</html>
26 changes: 26 additions & 0 deletions demo-vue/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
{
"name": "demo-vue",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"dependencies": {
"npyjs": "^0.6.0",
"onnxruntime-web": "^1.18.0",
"underscore": "^1.13.6",
"vue": "^3.4.21"
},
"devDependencies": {
"@types/underscore": "^1.11.15",
"@vitejs/plugin-vue": "^5.0.4",
"autoprefixer": "^10.4.19",
"postcss": "^8.4.38",
"tailwindcss": "^3.4.4",
"vite": "^5.3.1",
"vite-plugin-cross-origin-isolation": "^0.1.6"
}
}
6 changes: 6 additions & 0 deletions demo-vue/postcss.config.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
export default {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
}
215 changes: 215 additions & 0 deletions demo-vue/src/App.vue
Original file line number Diff line number Diff line change
@@ -0,0 +1,215 @@
<script setup lang="ts">

const IMAGE_PATH = "src/assets/dogs.jpg";
const IMAGE_EMBEDDING = "src/assets/dogs_embedding.npy";
const MODEL_DIR = "src/assets/sam_onnx_quantized_example.onnx";

import npyjs from "npyjs";
import {InferenceSession, Tensor} from "onnxruntime-web";
import {onBeforeMount, ref} from "vue";
import * as ort from "onnxruntime-web";
import * as _ from "underscore";
import {onnxMaskToImage} from "./maskUtils";



const loadNpyTensor = async (tensorFile: string, dType: string) => {
let npLoader = new npyjs();
const npArray = await npLoader.load(tensorFile);
const tensor = new ort.Tensor(dType, npArray.data, npArray.shape);
return tensor;
};

const model= ref<InferenceSession | null>(null);
const initModel = async () => {
try {
if (MODEL_DIR === undefined) return;
const URL: string = MODEL_DIR;
// or download the model from a CDN ,and put it in the src folder
ort.env.wasm.wasmPaths = "https://cdn.jsdelivr.net/npm/[email protected]/dist/"
model.value = await InferenceSession.create(URL);
} catch (e) {
console.log(e);
}
};
const clicks=ref()

const handleImageScale = (image: HTMLImageElement) => {
// Input images to SAM must be resized so the longest side is 1024
const LONG_SIDE_LENGTH = 1024;
let w = image.naturalWidth;
let h = image.naturalHeight;
const samScale = LONG_SIDE_LENGTH / Math.max(h, w);
return { height: h, width: w, samScale };
};

const imageSrc=ref()
const modelScale=ref()
const shouldFitToWidth=ref()
const loadImage = async (url: URL) => {
try {
const img = new Image();
img.src = url;
img.onload = () => {
const { height, width, samScale } = handleImageScale(img);
modelScale.value={
height: height, // original image height
width: width, // original image width
samScale: samScale, // scaling factor for image which has been resized to longest side 1024
}
img.width = width;
img.height = height;
const imageAspectRatio = width / height;
const screenAspectRatio = window.innerWidth / window.innerHeight;
shouldFitToWidth.value=imageAspectRatio > screenAspectRatio
imageSrc.value=img
};
} catch (error) {
console.log(error);
}
};
const modelData = ({ clicks, tensor, modelScale }: any) => {
const imageEmbedding = tensor;
let pointCoords;
let pointLabels;
let pointCoordsTensor;
let pointLabelsTensor;

// Check there are input click prompts
if (clicks) {
let n = clicks.length;

// If there is no box input, a single padding point with
// label -1 and coordinates (0.0, 0.0) should be concatenated
// so initialize the array to support (n + 1) points.
pointCoords = new Float32Array(2 * (n + 1));
pointLabels = new Float32Array(n + 1);

// Add clicks and scale to what SAM expects
for (let i = 0; i < n; i++) {
pointCoords[2 * i] = clicks[i].x * modelScale.samScale;
pointCoords[2 * i + 1] = clicks[i].y * modelScale.samScale;
pointLabels[i] = clicks[i].clickType;
}

// Add in the extra point/label when only clicks and no box
// The extra point is at (0, 0) with label -1
pointCoords[2 * n] = 0.0;
pointCoords[2 * n + 1] = 0.0;
pointLabels[n] = -1.0;

// Create the tensor
pointCoordsTensor = new Tensor("float32", pointCoords, [1, n + 1, 2]);
pointLabelsTensor = new Tensor("float32", pointLabels, [1, n + 1]);
}
const imageSizeTensor = new Tensor("float32", [
modelScale.height,
modelScale.width,
]);

if (pointCoordsTensor === undefined || pointLabelsTensor === undefined)
return;

// There is no previous mask, so default to an empty tensor
const maskInput = new Tensor(
"float32",
new Float32Array(256 * 256),
[1, 1, 256, 256]
);
// There is no previous mask, so default to 0
const hasMaskInput = new Tensor("float32", [0]);

return {
image_embeddings: imageEmbedding,
point_coords: pointCoordsTensor,
point_labels: pointLabelsTensor,
orig_im_size: imageSizeTensor,
mask_input: maskInput,
has_mask_input: hasMaskInput,
};
};

const handleMouseMove = _.throttle(async (e: any) => {
let el = e.target;
const rect = el.getBoundingClientRect();
let x = e.clientX - rect.left;
let y = e.clientY - rect.top;
const imageScale = imageSrc.value ? imageSrc.value.width / el.offsetWidth : 1;
x *= imageScale;
y *= imageScale;
clicks.value=[{x:x,y:y,clickType:1}]
await runONNX()
}, 15);

const handleMouseout= async () => {
clicks.value=null
predictImg.value=null
};
const runONNX = async () => {
try {
if (
model.value === null ||
clicks.value === null ||
tensor.value === null ||
modelScale.value === null
)
return;
else {
// Preapre the model input in the correct format for SAM.
// The modelData function is from onnxModelAPI.tsx.
const feeds = modelData({
clicks:clicks.value,
tensor:tensor.value,
modelScale:modelScale.value,
});
if (feeds === undefined) return;
// Run the SAM ONNX model with the feeds returned from modelData()
const results = await model.value.run(feeds);
const output = results[model.value.outputNames[0]];
// The predicted mask returned from the ONNX model is an array which is
// rendered as an HTML image using onnxMaskToImage() from maskUtils.tsx.
predictImg.value = onnxMaskToImage(output.data, output.dims[2], output.dims[3]).src
}
} catch (e) {
console.error("Error running ONNX model")
console.log(e);
}
};

const predictImg=ref()

const tensor = ref<ort.Tensor | null>(null);
onBeforeMount(async () => {
await loadImage(IMAGE_PATH)
await initModel();
tensor.value = await loadNpyTensor(IMAGE_EMBEDDING, "float32")
});

</script>

<template>
<div class="flex items-center justify-center w-full h-full">
<div class="flex items-center justify-center relative w-[90%] h-[90%]">
<img :src="IMAGE_PATH" :class="shouldFitToWidth?'w-full': 'h-full'"
@mousemove="handleMouseMove"
@mouseout="handleMouseout"
/>
<img :src='predictImg' class="absolute opacity-40 pointer-events-none"/>
</div>
</div>
</template>

<style scoped>
.logo {
height: 6em;
padding: 1.5em;
will-change: filter;
transition: filter 300ms;
}
.logo:hover {
filter: drop-shadow(0 0 2em #646cffaa);
}
.logo.vue:hover {
filter: drop-shadow(0 0 2em #42b883aa);
}
</style>
Binary file added demo-vue/src/assets/dogs.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
5 changes: 5 additions & 0 deletions demo-vue/src/main.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import { createApp } from 'vue'
import './style.css'
import App from './App.vue'

createApp(App).mount('#app')
47 changes: 47 additions & 0 deletions demo-vue/src/maskUtils.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
// Copyright (c) Meta Platforms, Inc. and affiliates.
// All rights reserved.

// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.

// Convert the onnx model mask prediction to ImageData
function arrayToImageData(input: any, width: number, height: number) {
const [r, g, b, a] = [0, 114, 189, 255]; // the masks's blue color
const arr = new Uint8ClampedArray(4 * width * height).fill(0);
for (let i = 0; i < input.length; i++) {

// Threshold the onnx model mask prediction at 0.0
// This is equivalent to thresholding the mask using predictor.model.mask_threshold
// in python
if (input[i] > 0.0) {
arr[4 * i + 0] = r;
arr[4 * i + 1] = g;
arr[4 * i + 2] = b;
arr[4 * i + 3] = a;
}
}
return new ImageData(arr, height, width);
}

// Use a Canvas element to produce an image from ImageData
function imageDataToImage(imageData: ImageData) {
const canvas = imageDataToCanvas(imageData);
const image = new Image();
image.src = canvas.toDataURL();
return image;
}

// Canvas elements can be created from ImageData
function imageDataToCanvas(imageData: ImageData) {
const canvas = document.createElement("canvas");
const ctx = canvas.getContext("2d");
canvas.width = imageData.width;
canvas.height = imageData.height;
ctx?.putImageData(imageData, 0, 0);
return canvas;
}

// Convert the onnx model mask output to an HTMLImageElement
export function onnxMaskToImage(input: any, width: number, height: number) {
return imageDataToImage(arrayToImageData(input, width, height));
}
3 changes: 3 additions & 0 deletions demo-vue/src/style.css
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
8 changes: 8 additions & 0 deletions demo-vue/tailwind.config.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
/** @type {import('tailwindcss').Config} */
module.exports = {
content: ['./index.html', './src/**/*.{vue,js,ts,jsx,tsx}'],
theme: {
extend: {},
},
plugins: [],
}
8 changes: 8 additions & 0 deletions demo-vue/vite.config.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import { defineConfig } from 'vite'
import vue from '@vitejs/plugin-vue'
import crossOriginIsolation from 'vite-plugin-cross-origin-isolation'

// https://vitejs.dev/config/
export default defineConfig({
plugins: [vue(),crossOriginIsolation()],
})