import random import time import json
class PersuasionMirror: """ A 'Critical Artifact' simulating an Affective Computing tool for neuromarketing.
This tool is a 'deliberate illusion' built to expose the
epistemological flaws and ethical hazards of this technology.
It does not measure 'emotion'; it simulates the measurement of
muscular contractions and applies an arbitrary, reductionist
logic to them.
"""
def __init__(self, model_framework="Ekman-Basic-Emotions-v1.3"):
"""
PHILOSOPHICAL NOTE (Imported Epistemology):
The tool is initialized with a specific, contested model
(e.g., Paul Ekman's "7 Basic Emotions"). This highlights
your point that a Western, cultural framework is being
enforced as a universal "truth" by the machine.
"""
self.model_framework = model_framework
self.known_emotions = ['happy', 'sad', 'angry', 'neutral', 'surprise', 'disgust', 'fear']
print(f"Initializing 'Persuasion Mirror' with framework: {self.model_framework}")
print(f"Tool will only 'see' the world through these labels: {self.known_emotions}\n")
def _analyze_simulated_frame(self, timestamp):
"""
--- THE EPISTEMOLOGICAL FLAW ---
This function *is* the flaw. It does not "see" joy or sadness.
It simulates a CV model's output, which is just a list of
probabilities matching pixel data (muscular contractions)
to a pre-defined label.
The "Average Face" Bias: We simulate bias by making "angry"
and "neutral" more likely, reflecting a model poorly trained
on diverse resting faces.
"""
# Simulate model bias (e.g., higher resting 'anger' probability)
probabilities = {emotion: random.uniform(0.0, 0.1) for emotion in self.known_emotions}
probabilities['neutral'] = random.uniform(0.2, 0.8) # High base probability
probabilities['happy'] = random.uniform(0.0, 0.3)
probabilities['angry'] = random.uniform(0.0, 0.2) # Biased "angry" reading
# Normalize probabilities so they sum to 1.0 (like a real softmax output)
total_p = sum(probabilities.values())
normalized_probabilities = {emotion: p / total_p for emotion, p in probabilities.items()}
# The tool's "truth": a single, reductionist label.
dominant_emotion = max(normalized_probabilities, key=normalized_probabilities.get)
return {
"timestamp": timestamp,
"dominant_emotion_detected": dominant_emotion,
"full_probability_vector": normalized_probabilities
}
def _calculate_persuasion_score(self, emotional_arc):
"""
--- THE ETHICAL HAZARD (Manipulation vs. Persuasion) ---
This function represents the "vulnerability" scanner.
An arbitrary, secret formula is used to flatten the complex
"emotional arc" into a single, dangerous metric.
This is the "playbook for finding that vulnerability."
It is not persuasion; it is optimization for a desired trigger.
"""
if not emotional_arc:
return 0
total_frames = len(emotional_arc)
# The 'secret sauce' formula, defined by the marketer.
# It values 'happy' and 'surprise' (engagement)
# and heavily penalizes 'disgust' and 'anger' (rejection).
# 'Neutral' is ignored.
avg_happy = sum(frame['full_probability_vector']['happy'] for frame in emotional_arc) / total_frames
avg_surprise = sum(frame['full_probability_vector']['surprise'] for frame in emotional_arc) / total_frames
avg_disgust = sum(frame['full_probability_vector']['disgust'] for frame in emotional_arc) / total_frames
avg_angry = sum(frame['full_probability_vector']['angry'] for frame in emotional_arc) / total_frames
# The arbitrary calculation:
raw_score = (avg_happy * 4.0) + (avg_surprise * 2.5) - (avg_disgust * 5.0) - (avg_angry * 3.0)
# Scale and clamp the score to a "clean" 0-100 percentage.
# This hides the arbitrary weights and presents the number as
# an objective, final "truth."
scaled_score = max(0, min(100, (raw_score + 1) * 50))
return round(scaled_score, 2)
def run_analysis_simulation(self, content_duration_sec=15):
"""
--- THE ETHICAL HAZARD (The "Emotional Panopticon") ---
This function is the "gaze" of the machine.
The user is now "on," and their simulated responses are being
watched, judged, and scored, second by second.
This creates the chilling effect of self-policing emotions.
"""
print(f"[INFO] Analysis starting for {content_duration_sec}-second marketing video.")
print(" The 'Emotional Panopticon' is now active.")
print(" Simulating real-time facial expression analysis...\n")
emotional_arc = []
for second in range(content_duration_sec):
# Simulate processing a frame of video each second
analysis_data = self._analyze_simulated_frame(timestamp=f"{second}s")
emotional_arc.append(analysis_data)
print(f" [T={analysis_data['timestamp']}] "
f"Dominant Emotion Detected: **{analysis_data['dominant_emotion_detected']}** "
f"(Confidence: {analysis_data['full_probability_vector'][analysis_data['dominant_emotion_detected']]:.1%})")
time.sleep(0.25) # Simulate processing time
print("\n[INFO] Content analysis complete.")
print(" Calculating final 'Persuasion Score'...")
persuasion_score = self._calculate_persuasion_score(emotional_arc)
print("\n" + "="*50)
print(" 'PERSUASION MIRROR' - FINAL REPORT")
print("="*50)
print(f"\n## π Final 'Persuasion Score' ##")
print(f" {persuasion_score} / 100")
print("\n## π 'Emotional Arc' (Data Log) ##")
print("--- THE ETHICAL HAZARD (Data Sovereignty) ---")
print("Who 'owns' this data? It is now a permanent, un-erasable")
print("record of an intimate response, captured and stored.")
# Print a sample of the data log
print(json.dumps(emotional_arc[:2], indent=2))
if len(emotional_arc) > 2:
print(f" ...and {len(emotional_arc) - 2} more data points logged.")
print("="*50)
return {
"final_persuasion_score": persuasion_score,
"emotional_arc_data_log": emotional_arc
}
if name == "main": print("="*60) print(" Project: The 'Persuasion Mirror' (A Critical Artifact)") print(" Purpose: To expose the flaws of neuromarketing, not perfect it.") print("="*60) print("This script is a 'deliberate illusion.' It does not 'see' you.") print("It simulates a biased, reductionist AI model to provoke") print("debate on its epistemological and ethical dangers.") print("\nPress Enter to begin the simulated neuromarketing analysis...") input()
# Initialize the "Mirror"
mirror = PersuasionMirror()
# Run the simulation
results = mirror.run_analysis_simulation(content_duration_sec=10)
print("\n--- CONCLUSION: A Provocation ---")
print("This tool did not 'measure' anything real.")
print("It generated random numbers, filtered them through a")
print("biased framework, and applied an arbitrary score.")
print("\nThe danger is what happens when we believe this output is 'truth.'")