A React provider and hooks for seamlessly integrating Hugging Face Transformers.js into your React applications. This library provides intelligent loading states, error handling, model caching, React Suspense support, and comprehensive image processing capabilities including segmentation, classification, and captioning.
npm install huggingface-transformers-react
yarn add huggingface-transformers-react
pnpm add huggingface-transformers-react
WebLLM support is built-in and loaded automatically from CDN when enabled - no additional installation required!
Simply set enableWebLLM={true} in the TransformersProvider:
<TransformersProvider enableWebLLM={true}>
<App />
</TransformersProvider>
Note: WebLLM requires a WebGPU-capable browser (Chrome/Edge 113+ recommended)
Wrap your app with the TransformersProvider:
import React from 'react';
import { TransformersProvider } from 'huggingface-transformers-react';
import App from './App';
function Root() {
return (
<TransformersProvider>
<App />
</TransformersProvider>
);
}
export default Root;
import React, { useEffect, useState } from 'react';
import { useTransformers } from 'huggingface-transformers-react';
function SentimentAnalyzer() {
const { libraryStatus, analyzeSentiment } = useTransformers();
const [result, setResult] = useState(null);
const [text, setText] = useState('I love this library!');
const handleAnalyze = async () => {
if (libraryStatus === 'ready') {
const sentiment = await analyzeSentiment(text);
setResult(sentiment);
}
};
return (
<div>
<textarea
value={text}
onChange={(e) => setText(e.target.value)}
placeholder="Enter text to analyze..."
/>
<button
onClick={handleAnalyze}
disabled={libraryStatus !== 'ready'}
>
{libraryStatus === 'loading' ? 'Loading AI...' : 'Analyze Sentiment'}
</button>
{result && (
<div>
<strong>Sentiment:</strong> {result[0].label} ({result[0].score.toFixed(2)})
</div>
)}
</div>
);
}
For the smoothest user experience, use with React Suspense:
import React, { Suspense } from 'react';
import { TransformersProvider, useTransformersReady, useTransformers } from 'huggingface-transformers-react';
function AIFeature() {
useTransformersReady(); // This will suspend until ready
const { analyzeSentiment } = useTransformers();
// Component will only render when transformers is ready
return (
<div>
<h2>AI-Powered Features</h2>
{/* Your AI features here */}
</div>
);
}
function App() {
return (
<TransformersProvider>
<Suspense fallback={<div>🤖 Loading AI models...</div>}>
<AIFeature />
</Suspense>
</TransformersProvider>
);
}
<TransformersProvider>The main provider component that manages the Transformers.js library.
| Prop | Type | Default | Description |
|---|---|---|---|
children |
ReactNode |
- | React children to render |
moduleUrl |
string |
CDN URL | Custom URL for the transformers library |
loadTimeout |
number |
60000 |
Timeout in milliseconds for loading |
maxRetries |
number |
3 |
Maximum retry attempts |
nonce |
string |
- | CSP nonce for script tags |
onLibraryError |
(error: Error) => void |
- | Library loading error callback |
onModelError |
(modelId: string, error: Error) => void |
- | Model loading error callback |
<TransformersProvider
moduleUrl="/static/transformers.esm.js"
loadTimeout={30000}
maxRetries={5}
nonce={cspNonce}
onLibraryError={(error) => console.error('Library failed:', error)}
onModelError={(modelId, error) => console.error(`Model ${modelId} failed:`, error)}
>
<App />
</TransformersProvider>
useTransformers()Hook to access the transformers context and functionality.
interface TransformersContextValue {
// Library State
isLibraryLoaded: boolean;
libraryStatus: 'idle' | 'loading' | 'ready' | 'error';
libraryError: Error | null;
// Model State
models: Record<string, any>;
modelStatus: Record<string, ModelStatus>;
modelErrors: Record<string, Error | null>;
// Actions
loadModel: <T>(modelId: string, task?: string, retry?: number) => Promise<T>;
unloadModel: (modelId: string) => void;
analyzeSentiment: (text: string, customModel?: string, options?: any) => Promise<SentimentResult[]>;
transcribeAudio: (audio: Blob | File, options?: any) => Promise<{ text: string }>;
// Image Processing
segmentImage: (image: string | File | Blob, customModel?: string, options?: any) => Promise<ImageSegmentationResult[]>;
captionImage: (image: string | File | Blob, customModel?: string, options?: any) => Promise<ImageCaptionResult[]>;
classifyImage: (image: string | File | Blob, customModel?: string, options?: any) => Promise<ImageClassificationResult[]>;
// Suspense
readyPromise: Promise<void>;
}
useTransformersReady()Suspense-friendly hook that suspends rendering until the library is ready.
function MyAIComponent() {
useTransformersReady(); // Suspends until ready
const { analyzeSentiment } = useTransformers();
// Safe to use AI features here
return <div>AI features ready!</div>;
}
Load and use custom models for specific tasks:
function CustomModelExample() {
const { loadModel, libraryStatus } = useTransformers();
const [result, setResult] = useState(null);
const useCustomModel = async () => {
if (libraryStatus === 'ready') {
// Load a specific model
const classifier = await loadModel(
'cardiffnlp/twitter-roberta-base-sentiment-latest',
'sentiment-analysis'
);
const result = await classifier('This is amazing!');
setResult(result);
}
};
return (
<div>
<button onClick={useCustomModel}>
Use Custom Model
</button>
{result && <pre>{JSON.stringify(result, null, 2)}</pre>}
</div>
);
}
The library automatically handles audio format conversion for Whisper models, converting audio blobs to the required Float32Array format with proper resampling to 16kHz.
function AudioTranscription() {
const { transcribeAudio, libraryStatus } = useTransformers();
const [transcription, setTranscription] = useState('');
const [loading, setLoading] = useState(false);
const handleFileUpload = async (event) => {
const file = event.target.files[0];
if (file && libraryStatus === 'ready') {
setLoading(true);
try {
// Library automatically converts audio to proper format for Whisper
const result = await transcribeAudio(file);
setTranscription(result.text);
} catch (error) {
console.error('Transcription failed:', error);
} finally {
setLoading(false);
}
}
};
// Voice recording example
const [recording, setRecording] = useState(false);
const [mediaRecorder, setMediaRecorder] = useState(null);
const startRecording = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const recorder = new MediaRecorder(stream);
const chunks = [];
recorder.ondataavailable = (e) => chunks.push(e.data);
recorder.onstop = async () => {
const audioBlob = new Blob(chunks, { type: 'audio/webm' });
const result = await transcribeAudio(audioBlob);
setTranscription(result.text);
stream.getTracks().forEach(track => track.stop());
};
recorder.start();
setMediaRecorder(recorder);
setRecording(true);
} catch (error) {
console.error('Recording failed:', error);
}
};
const stopRecording = () => {
if (mediaRecorder) {
mediaRecorder.stop();
setMediaRecorder(null);
setRecording(false);
}
};
return (
<div>
<div>
<input
type="file"
accept="audio/*"
onChange={handleFileUpload}
disabled={libraryStatus !== 'ready' || loading}
/>
<button
onClick={recording ? stopRecording : startRecording}
disabled={libraryStatus !== 'ready' || loading}
>
{recording ? 'Stop Recording' : 'Start Recording'}
</button>
</div>
{loading && <p>Processing audio...</p>}
{transcription && (
<div>
<h3>Transcription:</h3>
<p>{transcription}</p>
</div>
)}
</div>
);
}
The transcribeAudio function automatically handles:
Supported audio formats: WAV, MP3, MP4, WebM, OGG, and any format supported by the browser's AudioContext.
The library provides comprehensive image processing capabilities including segmentation, classification, and captioning.
function ImageProcessingExample() {
const { segmentImage, classifyImage, captionImage, libraryStatus } = useTransformers();
const [image, setImage] = useState(null);
const [results, setResults] = useState({});
const handleImageUpload = (event) => {
const file = event.target.files[0];
if (file) {
setImage(file);
setResults({});
}
};
const processImage = async (type) => {
if (!image || libraryStatus !== 'ready') return;
try {
let result;
switch (type) {
case 'segment':
// Segment objects in the image
result = await segmentImage(image);
break;
case 'classify':
// Classify the image with top 5 results
result = await classifyImage(image, 'Xenova/vit-base-patch16-224', { top_k: 5 });
break;
case 'caption':
// Generate a caption for the image
result = await captionImage(image);
break;
}
setResults(prev => ({ ...prev, [type]: result }));
} catch (error) {
console.error(`${type} failed:`, error);
}
};
return (
<div>
<input
type="file"
accept="image/*"
onChange={handleImageUpload}
disabled={libraryStatus !== 'ready'}
/>
{image && (
<div>
<img
src={URL.createObjectURL(image)}
alt="Preview"
style={{ maxWidth: '300px', maxHeight: '300px' }}
/>
<div>
<button onClick={() => processImage('segment')}>
Segment Objects
</button>
<button onClick={() => processImage('classify')}>
Classify Image
</button>
<button onClick={() => processImage('caption')}>
Generate Caption
</button>
</div>
</div>
)}
{results.caption && (
<div>
<h3>Caption:</h3>
<p>{results.caption[0].generated_text}</p>
</div>
)}
{results.classify && (
<div>
<h3>Classification:</h3>
{results.classify.map((item, i) => (
<div key={i}>
{item.label}: {(item.score * 100).toFixed(1)}%
</div>
))}
</div>
)}
{results.segment && (
<div>
<h3>Segmentation:</h3>
<p>Found {results.segment.length} objects</p>
</div>
)}
</div>
);
}
Image Segmentation: Detect and segment objects with mask generation
Xenova/detr-resnet-50-panopticImage Classification: Classify scenes and objects with confidence scores
Xenova/vit-base-patch16-224top_k to limit number of resultsImage Captioning: Generate descriptive text from images
Xenova/vit-gpt2-image-captioningfunction ModelManager() {
const { models, modelStatus, loadModel, unloadModel } = useTransformers();
const loadSentimentModel = () => {
loadModel('Xenova/distilbert-base-uncased-finetuned-sst-2-english', 'sentiment-analysis');
};
const unloadSentimentModel = () => {
unloadModel('Xenova/distilbert-base-uncased-finetuned-sst-2-english');
};
return (
<div>
<h3>Loaded Models:</h3>
{Object.entries(models).map(([modelId, model]) => (
<div key={modelId}>
<span>{modelId}</span>
<span>Status: {modelStatus[modelId]}</span>
<button onClick={() => unloadModel(modelId)}>Unload</button>
</div>
))}
<button onClick={loadSentimentModel}>
Load Sentiment Model
</button>
</div>
);
}
If you're using CSP, you'll need to allow the transformers script:
<TransformersProvider nonce={cspNonce}>
<App />
</TransformersProvider>
You can self-host the transformers library:
<TransformersProvider moduleUrl="/static/transformers.esm.js">
<App />
</TransformersProvider>
function AppWithErrorHandling() {
const handleLibraryError = (error: Error) => {
console.error('Transformers library failed to load:', error);
// Report to error tracking service
};
const handleModelError = (modelId: string, error: Error) => {
console.error(`Model ${modelId} failed to load:`, error);
// Show user-friendly error message
};
return (
<TransformersProvider
onLibraryError={handleLibraryError}
onModelError={handleModelError}
>
<App />
</TransformersProvider>
);
}
Check out our examples directory for complete working examples:
We welcome contributions! Please see our Contributing Guide for details.
npm installnpm run devnpm testnpm run buildBrowser Compatibility
Audio transcription requires:
Library not loading
loadTimeout propModels failing to load
If you encounter any issues, please create an issue on GitHub.
MIT © Muhammad Dadu
Made with ❤️ by Muhammad Dadu