Your AI characters.
Your AI characters.
Real-time generative animation model.
Interactive. On-device.
Real-time generative animation model. Interactive. On-device.

0:01
Hanna

0:01
Hanna
import AnimationSDK
import SwiftUI
struct ChefCompanionView: View {
@State var chef: Character
var body: some View {
CharacterView(chef) { characterView in
characterView.loadEnvironment("modern_kitchen")
} update: { conversationState, characterView in
if let emotion = conversationState.emotion,
emotion.primary == .confused,
emotion.confidence > 0.5
{
characterView.switchEnvironment(
"ingredient_pantry")
}
if conversationState.hasKeyword("dessert") {
characterView.switchEnvironment(
"pastry_station")
}
}
.frame(maxWidth: .infinity, maxHeight: .infinity)
.onAppear {
chef.enableRecipeMode(true)
chef.enableTimerAlerts(true)
chef.sayIntroPhrase()
}
.onTapGesture {
chef.showTechnique()
}
.onReceive(chef.$dishCompleted) {
chef.saySimilar(
"Voilà! Beautiful dish! Shall we plate it together?"
)
}
.onChange(chef.$recipeStep) { oldValue, newValue in
chef.goTo("prep_station")
}
}
}
struct FitnessTrainerView: View {
@State var trainer: Character
var body: some View {
CharacterView(trainer) { characterView in
characterView.loadEnvironment("gym_studio")
} update: { conversationState, characterView in
if let emotion = conversationState.emotion,
emotion.primary == .energetic,
emotion.confidence > 0.7
{
characterView.switchEnvironment(
"outdoor_track")
}
if conversationState.fatigueLevelDetected > 0.8
{
characterView.switchEnvironment(
"recovery_room")
}
}
.frame(maxWidth: .infinity, maxHeight: .infinity)
.onAppear {
trainer.enableMovementTracking(true)
trainer.enableVoiceCommands(true)
trainer.sayIntroPhrase()
}
.onTapGesture {
trainer.demonstrateExercise()
}
.onReceive(trainer.$workoutCompleted) {
trainer.saySimilar(
"Amazing job! You crushed it! Ready for cooldown?"
)
}
.onChange(trainer.$exerciseID) {
oldValue, newValue in
trainer.goTo("exercise_mat")
}
}
}
struct MeditationGuideView: View {
@State var guide: Character
var body: some View {
CharacterView(guide) { characterView in
characterView.loadEnvironment("zen_garden")
} update: { conversationState, characterView in
if let emotion = conversationState.emotion,
emotion.primary == .stressed,
emotion.confidence > 0.6
{
characterView.switchEnvironment(
"calm_beach")
}
if conversationState.breathingRhythm == .deep {
characterView.switchEnvironment(
"mountain_peak")
}
}
.frame(maxWidth: .infinity, maxHeight: .infinity)
.onAppear {
guide.enableBiometricMonitoring(true)
guide.enableAmbientSounds(true)
guide.sayIntroPhrase()
}
.onTapGesture {
guide.sayWisdomWords()
}
.onReceive(guide.$sessionCompleted) {
guide.saySimilar(
"Wonderful practice. How do you feel now?")
}
.onChange(guide.$meditationPhase) {
oldValue, newValue in
guide.goTo("lotus_position")
}
}
}
import AnimationSDK
import SwiftUI
struct ChefCompanionView: View {
@State var chef: Character
var body: some View {
CharacterView(chef) { characterView in
characterView.loadEnvironment("modern_kitchen")
} update: { conversationState, characterView in
if let emotion = conversationState.emotion,
emotion.primary == .confused,
emotion.confidence > 0.5
{
characterView.switchEnvironment(
"ingredient_pantry")
}
if conversationState.hasKeyword("dessert") {
characterView.switchEnvironment(
"pastry_station")
}
}
.frame(maxWidth: .infinity, maxHeight: .infinity)
.onAppear {
chef.enableRecipeMode(true)
chef.enableTimerAlerts(true)
chef.sayIntroPhrase()
}
.onTapGesture {
chef.showTechnique()
}
.onReceive(chef.$dishCompleted) {
chef.saySimilar(
"Voilà! Beautiful dish! Shall we plate it together?"
)
}
.onChange(chef.$recipeStep) { oldValue, newValue in
chef.goTo("prep_station")
}
}
}
struct FitnessTrainerView: View {
@State var trainer: Character
var body: some View {
CharacterView(trainer) { characterView in
characterView.loadEnvironment("gym_studio")
} update: { conversationState, characterView in
if let emotion = conversationState.emotion,
emotion.primary == .energetic,
emotion.confidence > 0.7
{
characterView.switchEnvironment(
"outdoor_track")
}
if conversationState.fatigueLevelDetected > 0.8
{
characterView.switchEnvironment(
"recovery_room")
}
}
.frame(maxWidth: .infinity, maxHeight: .infinity)
.onAppear {
trainer.enableMovementTracking(true)
trainer.enableVoiceCommands(true)
trainer.sayIntroPhrase()
}
.onTapGesture {
trainer.demonstrateExercise()
}
.onReceive(trainer.$workoutCompleted) {
trainer.saySimilar(
"Amazing job! You crushed it! Ready for cooldown?"
)
}
.onChange(trainer.$exerciseID) {
oldValue, newValue in
trainer.goTo("exercise_mat")
}
}
}
struct MeditationGuideView: View {
@State var guide: Character
var body: some View {
CharacterView(guide) { characterView in
characterView.loadEnvironment("zen_garden")
} update: { conversationState, characterView in
if let emotion = conversationState.emotion,
emotion.primary == .stressed,
emotion.confidence > 0.6
{
characterView.switchEnvironment(
"calm_beach")
}
if conversationState.breathingRhythm == .deep {
characterView.switchEnvironment(
"mountain_peak")
}
}
.frame(maxWidth: .infinity, maxHeight: .infinity)
.onAppear {
guide.enableBiometricMonitoring(true)
guide.enableAmbientSounds(true)
guide.sayIntroPhrase()
}
.onTapGesture {
guide.sayWisdomWords()
}
.onReceive(guide.$sessionCompleted) {
guide.saySimilar(
"Wonderful practice. How do you feel now?")
}
.onChange(guide.$meditationPhase) {
oldValue, newValue in
guide.goTo("lotus_position")
}
}
}
Powering the next generation
of AI companions
Powering the next generation of AI companions
Real-time
On-device
3D
Radically simpler, faster animation unlocks deeper emotional connections, richer storytelling, and entirely new interactions at scale, and at a fraction of the cost.
The family of solutions
The family of
solutions
One stack. Infinite creative potential.
One stack. Infinite creative potential.
One stack. Infinite creative potential.
One stack. Infinite creative potential.

Ani-2 Model
Our proprietary on-device AI model generates
full-body 3D motion in real-time. No motion capture,
no cloud, no delay.
Our proprietary on-device AI model generates full-body 3D motion in real-time. No motion capture,
no cloud, no delay.
Our proprietary on-device AI model generates
full-body 3D motion in real-time. No motion capture, no cloud, no delay.
Inference speed: 2.5 ms/frame

Anichat Platform
Any character, any personality, any look.
If you can imagine them, you can bring them to life.
Video call them, stay in touch, share.
Any character, any personality, any look.
If you can imagine them, you can bring them to life. Video call them, stay in touch, share.
Any character, any personality, any look.
If you can imagine them, you can bring them to life. Video call them, stay in touch, share.
SOON
Interactive
Livestreaming
Interactive Livestreaming
Control your characters while streaming.
Let them interact with your audience
in real-time.
SOON
Content
Production Engine
Turn ideas into videos with unmatched speed,
character consistency, precise creative control,
and at a fraction of the cost.
Turn ideas into videos with unmatched speed, character consistency, precise creative control, and at a fraction of the cost.
Team
The Animation Inc. team is the original crew behind MSQRD, Meta Spark, Meta Face Tracker and Loóna. The remote team of 13 is distributed across London, Warsaw, Cyprus, and San Francisco. The company is backed by Elefund, True Ventures, Haystack and other prominent Silicon Valley–based venture capital firms.
The Animation Inc. team is the original crew behind MSQRD, Meta Spark, Meta Face Tracker and Loóna. The remote team of 13 is distributed across London, Warsaw, Cyprus, and San Francisco. The company is backed by Elefund, True Ventures, Haystack and other prominent Silicon Valley–based venture capital firms.
The Animation Inc. team is the original crew behind MSQRD, Meta Spark, Meta Face Tracker and Loóna. The remote team of 13 is distributed across London, Warsaw, Cyprus, and San Francisco. The company is backed by Elefund, True Ventures, Haystack and other prominent Silicon Valley–based venture capital firms.

Sergey Gonchar, CEO

Eugene Zatepyakin, CTO

Eugene Nevgen, CSO

Andrew Yanchurevich, CPO

Dmitry Doryn, CDO
Previously
Previously

Creators of one of most popular AR app MSQRD 100M users. Acquired by Meta. Scaled to 1B users on Instagram.
Creators of one of most popular AR app MSQRD 100M users. Acquired by Meta. Scaled to 1B users on Instagram.


Creators of Apple Design Award winner app, Loóna, and Apple Vision Pro launching partner, Loóna Spatial.
Creators of Apple Design Award winner app, Loóna, and Apple Vision Pro launching partner, Loóna Spatial.
Answers
What is Animation Inc?
Animation Inc. is a deeptech startup building real-time generative animation . We believe animation is the final frontier in crafting lifelike virtual characters and enriching large language model (LLM)-based experiences. In the era of generative AI, we're pushing boundaries to make virtual interactions more immersive and believable than ever.
How Generative Video different from Animation AI
While generative video delivers short pre-rendered scenes or clips, generative animation employs real-time AI to drive every movement and emotion dynamically. Characters become responsive and interactive, adapting instantly to your voice or text. One generates content. The other generates continuous presence.
Why is this development unique and critical?
This is the world’s first animation engine fully controlled by a neural network in real time on a device. It gives AI a face, emotions, and a living presence — something that can’t be achieved through motion capture or manual animation. Without it, AI remains abstract and disconnected from human experience.
Why does it matter for the world and humanity?
People connect with faces, gestures, and emotions — they build empathy and trust. Our technology adds that human layer to AI interaction. Avatars powered by Animation Inc. can teach languages, help with public speaking, provide customer support, and much more. In the future, this technology will become foundational for the metaverse and AGI — a world where AI doesn’t just speak, but lives and expresses itself through movement and emotion.
What is Animation Inc?
Animation Inc. is a deeptech startup building real-time generative animation . We believe animation is the final frontier in crafting lifelike virtual characters and enriching large language model (LLM)-based experiences. In the era of generative AI, we're pushing boundaries to make virtual interactions more immersive and believable than ever.
How Generative Video different from Animation AI
While generative video delivers short pre-rendered scenes or clips, generative animation employs real-time AI to drive every movement and emotion dynamically. Characters become responsive and interactive, adapting instantly to your voice or text. One generates content. The other generates continuous presence.
Why is this development unique and critical?
This is the world’s first animation engine fully controlled by a neural network in real time on a device. It gives AI a face, emotions, and a living presence — something that can’t be achieved through motion capture or manual animation. Without it, AI remains abstract and disconnected from human experience.
Why does it matter for the world and humanity?
People connect with faces, gestures, and emotions — they build empathy and trust. Our technology adds that human layer to AI interaction. Avatars powered by Animation Inc. can teach languages, help with public speaking, provide customer support, and much more. In the future, this technology will become foundational for the metaverse and AGI — a world where AI doesn’t just speak, but lives and expresses itself through movement and emotion.
What is Animation Inc?
Animation Inc. is a deeptech startup building real-time generative animation . We believe animation is the final frontier in crafting lifelike virtual characters and enriching large language model (LLM)-based experiences. In the era of generative AI, we're pushing boundaries to make virtual interactions more immersive and believable than ever.
How Generative Video different from Animation AI
While generative video delivers short pre-rendered scenes or clips, generative animation employs real-time AI to drive every movement and emotion dynamically. Characters become responsive and interactive, adapting instantly to your voice or text. One generates content. The other generates continuous presence.
Why is this development unique and critical?
This is the world’s first animation engine fully controlled by a neural network in real time on a device. It gives AI a face, emotions, and a living presence — something that can’t be achieved through motion capture or manual animation. Without it, AI remains abstract and disconnected from human experience.
Why does it matter for the world and humanity?
People connect with faces, gestures, and emotions — they build empathy and trust. Our technology adds that human layer to AI interaction. Avatars powered by Animation Inc. can teach languages, help with public speaking, provide customer support, and much more. In the future, this technology will become foundational for the metaverse and AGI — a world where AI doesn’t just speak, but lives and expresses itself through movement and emotion.

App Store
App Store
From text to presence
© Animation Inc. All rights reserved.
© Animation Inc. All rights reserved.