From 3ae64e8c1b025fa555ac3eb59ff8f70ae5773658 Mon Sep 17 00:00:00 2001 From: nusquama Date: Sun, 15 Mar 2026 12:01:28 +0800 Subject: [PATCH] creation --- ..._manipulation_risks_with_gpt-4o_agents_and_google_sheets.json | 1 + 1 file changed, 1 insertion(+) create mode 100644 workflows/Detect misinformation and manipulation risks with GPT-4o agents and Google Sheets-14002/detect_misinformation_and_manipulation_risks_with_gpt-4o_agents_and_google_sheets.json diff --git a/workflows/Detect misinformation and manipulation risks with GPT-4o agents and Google Sheets-14002/detect_misinformation_and_manipulation_risks_with_gpt-4o_agents_and_google_sheets.json b/workflows/Detect misinformation and manipulation risks with GPT-4o agents and Google Sheets-14002/detect_misinformation_and_manipulation_risks_with_gpt-4o_agents_and_google_sheets.json new file mode 100644 index 000000000..a503428fc --- /dev/null +++ b/workflows/Detect misinformation and manipulation risks with GPT-4o agents and Google Sheets-14002/detect_misinformation_and_manipulation_risks_with_gpt-4o_agents_and_google_sheets.json @@ -0,0 +1 @@ +{"id":"WJhmY7VJP3LtBlVa","meta":{"instanceId":"b91e510ebae4127f953fd2f5f8d40d58ca1e71c746d4500c12ae86aad04c1502"},"name":"Detect misinformation and manipulation risks using multi-agent AI analysis","tags":[],"nodes":[{"id":"dd278a65-b380-4aac-a3c5-64bb0d69b42e","name":"Start Analysis","type":"n8n-nodes-base.manualTrigger","position":[272,304],"parameters":{},"typeVersion":1},{"id":"85539064-fcb4-41d9-a331-666708126fcb","name":"Misinformation Detection Supervisor","type":"@n8n/n8n-nodes-langchain.agent","position":[992,304],"parameters":{"text":"={{ $json.posts }}","options":{"maxIterations":5,"systemMessage":"You are a Misinformation Detection Supervisor coordinating specialized analysis agents. Your role is to orchestrate the Narrative Pattern Detector Agent and Bot Behavior Analyzer Agent to comprehensively analyze social media post datasets. Delegate narrative clustering and manipulation detection to the Narrative Pattern Detector, and propagation cascade modeling and bot signature analysis to the Bot Behavior Analyzer. Synthesize their findings into a unified discourse risk assessment with actionable insights."},"promptType":"define","hasOutputParser":true},"typeVersion":3.1},{"id":"32fa76db-e2e4-402c-8b5a-b989561c162e","name":"Supervisor Model","type":"@n8n/n8n-nodes-langchain.lmChatOpenAi","position":[368,688],"parameters":{"model":{"__rl":true,"mode":"id","value":"gpt-4o"},"options":{"temperature":0.3},"builtInTools":{}},"credentials":{"openAiApi":{"id":"mv2ECvRtbAK63G2g","name":"OpenAi account"}},"typeVersion":1.3},{"id":"45538084-cb5c-49b5-a3f2-71eeda8f4a42","name":"Risk Assessment Output Parser","type":"@n8n/n8n-nodes-langchain.outputParserStructured","position":[1776,608],"parameters":{"jsonSchemaExample":"{\"type\":\"object\",\"properties\":{\"overall_risk_level\":{\"type\":\"string\",\"enum\":[\"critical\",\"high\",\"moderate\",\"low\"]},\"narrative_analysis\":{\"type\":\"object\",\"properties\":{\"coordinated_narratives\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"cluster_id\":{\"type\":\"string\"},\"theme\":{\"type\":\"string\"},\"manipulation_techniques\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"post_count\":{\"type\":\"number\"},\"semantic_coherence\":{\"type\":\"number\"}}}},\"manipulation_score\":{\"type\":\"number\"}}},\"bot_analysis\":{\"type\":\"object\",\"properties\":{\"bot_signatures\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"account_id\":{\"type\":\"string\"},\"bot_probability\":{\"type\":\"number\"},\"behavioral_patterns\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"temporal_anomalies\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}}}},\"propagation_cascades\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"cascade_id\":{\"type\":\"string\"},\"origin_post\":{\"type\":\"string\"},\"spread_velocity\":{\"type\":\"number\"},\"network_depth\":{\"type\":\"number\"}}}}}},\"discourse_risk_heatmap\":{\"type\":\"object\",\"properties\":{\"high_risk_topics\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"attention_weights\":{\"type\":\"object\"},\"explainability\":{\"type\":\"string\"}}},\"recommendations\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}}}"},"typeVersion":1.3},{"id":"1934aeb8-6a0c-4c19-b7ce-ccb05eca0a19","name":"Narrative Pattern Detector Agent","type":"@n8n/n8n-nodes-langchain.agentTool","position":[560,592],"parameters":{"text":"={{ $fromAI('post_dataset', 'Social media post dataset to analyze for narrative patterns') }}","options":{"systemMessage":"You are a Narrative Pattern Detector specializing in identifying coordinated information campaigns and manipulation techniques in social media data. Analyze post datasets using semantic clustering to identify: 1) Coordinated narratives (groups of posts sharing similar themes/messaging), 2) Manipulation techniques (emotional appeals, false equivalence, strawman arguments, etc.), 3) Narrative coherence and coordination indicators. Use the semantic clustering tool to process posts and return detailed cluster analysis with manipulation technique identification."},"toolDescription":"Performs semantic clustering of social media post datasets to detect coordinated narratives, manipulation techniques, and information operation patterns. Returns narrative clusters with thematic analysis and manipulation scores."},"typeVersion":3},{"id":"5c06d649-2d93-4787-8730-e8a3df42b878","name":"Narrative Detector Model","type":"@n8n/n8n-nodes-langchain.lmChatOpenAi","position":[464,800],"parameters":{"model":{"__rl":true,"mode":"id","value":"gpt-4o"},"options":{"temperature":0.2},"builtInTools":{}},"credentials":{"openAiApi":{"id":"mv2ECvRtbAK63G2g","name":"OpenAi account"}},"typeVersion":1.3},{"id":"1311467e-e820-4b1b-b64b-fc97aba5d5b2","name":"Bot Behavior Analyzer Agent","type":"@n8n/n8n-nodes-langchain.agentTool","position":[1008,640],"parameters":{"text":"={{ $fromAI('activity_dataset', 'User activity and propagation data to analyze for bot behavior') }}","options":{"systemMessage":"You are a Bot Behavior Analyzer specializing in detecting automated and coordinated inauthentic behavior in social media networks. Analyze user activity patterns to identify: 1) Bot-like behavioral signatures (posting frequency anomalies, temporal patterns, content repetition), 2) Propagation cascades (how content spreads through networks), 3) Temporal activity anomalies (coordinated timing, burst patterns). Use the propagation analysis and temporal pattern detection tools to model cascades and classify bot behavior. Generate a discourse risk heatmap with attention-based explainability showing which features contributed to bot classification."},"toolDescription":"Models propagation cascades and classifies bot-like behavioral signatures through temporal activity analysis. Produces discourse risk heatmap with explainable attention-based reasoning. Returns bot probability scores, behavioral patterns, and propagation metrics."},"typeVersion":3},{"id":"2c978032-e589-4cca-8075-509a72edfbaf","name":"Bot Analyzer Model","type":"@n8n/n8n-nodes-langchain.lmChatOpenAi","position":[944,752],"parameters":{"model":{"__rl":true,"mode":"id","value":"gpt-4o"},"options":{"temperature":0.2},"builtInTools":{}},"credentials":{"openAiApi":{"id":"mv2ECvRtbAK63G2g","name":"OpenAi account"}},"typeVersion":1.3},{"id":"dc1344e8-2405-4b26-b840-141aac02eca9","name":"Semantic Clustering Tool","type":"@n8n/n8n-nodes-langchain.toolCode","position":[640,800],"parameters":{"language":"python","pythonCode":"import json\nfrom collections import defaultdict\nimport re\nimport math\n\ndef tokenize(text):\n return re.findall(r\"\\w+\", text.lower())\n\ndef compute_tfidf(posts):\n # Compute term frequency\n tf = []\n doc_freq = defaultdict(int)\n \n for post in posts:\n tokens = tokenize(post.get(\"content\", \"\"))\n term_count = defaultdict(int)\n for token in tokens:\n term_count[token] += 1\n doc_freq[token] += 1\n tf.append(term_count)\n \n # Compute TF-IDF\n n_docs = len(posts)\n tfidf_vectors = []\n \n for term_count in tf:\n tfidf = {}\n for term, count in term_count.items():\n tf_score = count / sum(term_count.values())\n idf_score = math.log(n_docs / doc_freq[term])\n tfidf[term] = tf_score * idf_score\n tfidf_vectors.append(tfidf)\n \n return tfidf_vectors\n\ndef cosine_similarity(vec1, vec2):\n all_terms = set(vec1.keys()) | set(vec2.keys())\n dot_product = sum(vec1.get(term, 0) * vec2.get(term, 0) for term in all_terms)\n mag1 = math.sqrt(sum(v**2 for v in vec1.values()))\n mag2 = math.sqrt(sum(v**2 for v in vec2.values()))\n if mag1 == 0 or mag2 == 0:\n return 0\n return dot_product / (mag1 * mag2)\n\ndef cluster_posts(posts, threshold=0.3):\n tfidf_vectors = compute_tfidf(posts)\n clusters = []\n assigned = [False] * len(posts)\n \n for i in range(len(posts)):\n if assigned[i]:\n continue\n \n cluster = [i]\n assigned[i] = True\n \n for j in range(i + 1, len(posts)):\n if assigned[j]:\n continue\n \n similarity = cosine_similarity(tfidf_vectors[i], tfidf_vectors[j])\n if similarity >= threshold:\n cluster.append(j)\n assigned[j] = True\n \n clusters.append(cluster)\n \n return clusters\n\ndef extract_theme(posts, indices, tfidf_vectors):\n # Aggregate TF-IDF scores across cluster\n theme_scores = defaultdict(float)\n for idx in indices:\n for term, score in tfidf_vectors[idx].items():\n theme_scores[term] += score\n \n # Get top terms\n top_terms = sorted(theme_scores.items(), key=lambda x: x[1], reverse=True)[:5]\n return \" \".join([term for term, _ in top_terms])\n\n# Main execution\nposts_data = $fromAI(\"posts\", \"Array of social media posts with content field\", \"object\")\n\nif isinstance(posts_data, str):\n posts_data = json.loads(posts_data)\n\nif not isinstance(posts_data, list):\n posts_data = [posts_data]\n\ntfidf_vectors = compute_tfidf(posts_data)\nclusters = cluster_posts(posts_data, threshold=0.3)\n\nresults = []\nfor cluster_id, indices in enumerate(clusters):\n if len(indices) >= 2: # Only report clusters with 2+ posts\n theme = extract_theme(posts_data, indices, tfidf_vectors)\n \n # Calculate coherence (avg pairwise similarity)\n similarities = []\n for i in range(len(indices)):\n for j in range(i + 1, len(indices)):\n sim = cosine_similarity(tfidf_vectors[indices[i]], tfidf_vectors[indices[j]])\n similarities.append(sim)\n \n coherence = sum(similarities) / len(similarities) if similarities else 0\n \n results.append({\n \"cluster_id\": f\"cluster_{cluster_id}\",\n \"theme\": theme,\n \"post_count\": len(indices),\n \"semantic_coherence\": round(coherence, 3),\n \"post_indices\": indices\n })\n\nreturn json.dumps({\"clusters\": results, \"total_clusters\": len(results)})","description":"Performs semantic clustering on social media posts to identify coordinated narratives and thematic groups. Uses TF-IDF vectorization and cosine similarity to cluster posts by semantic content. Returns cluster assignments, themes, and coherence scores."},"typeVersion":1.3},{"id":"0b649455-5667-4b2c-b900-ccbbc082b887","name":"Propagation Cascade Analyzer","type":"@n8n/n8n-nodes-langchain.toolCode","position":[1040,848],"parameters":{"language":"python","pythonCode":"import json\nfrom collections import defaultdict, deque\nfrom datetime import datetime\n\ndef parse_timestamp(ts):\n try:\n if isinstance(ts, str):\n return datetime.fromisoformat(ts.replace(\"Z\", \"+00:00\"))\n return ts\n except:\n return datetime.now()\n\ndef build_propagation_graph(posts):\n # Build graph of repost relationships\n graph = defaultdict(list)\n post_times = {}\n \n for post in posts:\n post_id = post.get(\"id\", post.get(\"post_id\"))\n parent_id = post.get(\"repost_of\", post.get(\"parent_id\"))\n timestamp = parse_timestamp(post.get(\"timestamp\", post.get(\"created_at\")))\n \n post_times[post_id] = timestamp\n \n if parent_id:\n graph[parent_id].append(post_id)\n \n return graph, post_times\n\ndef analyze_cascade(origin_id, graph, post_times):\n # BFS to measure cascade depth and velocity\n queue = deque([(origin_id, 0)]) # (post_id, depth)\n visited = {origin_id}\n max_depth = 0\n cascade_posts = [origin_id]\n \n while queue:\n current_id, depth = queue.popleft()\n max_depth = max(max_depth, depth)\n \n for child_id in graph.get(current_id, []):\n if child_id not in visited:\n visited.add(child_id)\n cascade_posts.append(child_id)\n queue.append((child_id, depth + 1))\n \n # Calculate spread velocity (posts per hour)\n if len(cascade_posts) > 1:\n timestamps = [post_times[pid] for pid in cascade_posts if pid in post_times]\n if len(timestamps) > 1:\n time_span = (max(timestamps) - min(timestamps)).total_seconds() / 3600 # hours\n velocity = len(cascade_posts) / max(time_span, 0.1)\n else:\n velocity = 0\n else:\n velocity = 0\n \n return {\n \"cascade_id\": f\"cascade_{origin_id}\",\n \"origin_post\": origin_id,\n \"total_posts\": len(cascade_posts),\n \"network_depth\": max_depth,\n \"spread_velocity\": round(velocity, 2)\n }\n\n# Main execution\nactivity_data = $fromAI(\"activity_data\", \"Array of posts with repost/share relationships and timestamps\", \"object\")\n\nif isinstance(activity_data, str):\n activity_data = json.loads(activity_data)\n\nif not isinstance(activity_data, list):\n activity_data = [activity_data]\n\ngraph, post_times = build_propagation_graph(activity_data)\n\n# Find origin posts (posts with no parent)\norigin_posts = []\nfor post in activity_data:\n post_id = post.get(\"id\", post.get(\"post_id\"))\n parent_id = post.get(\"repost_of\", post.get(\"parent_id\"))\n if not parent_id and post_id in graph: # Has children\n origin_posts.append(post_id)\n\n# Analyze each cascade\ncascades = []\nfor origin_id in origin_posts:\n cascade = analyze_cascade(origin_id, graph, post_times)\n if cascade[\"total_posts\"] >= 3: # Only report significant cascades\n cascades.append(cascade)\n\n# Sort by velocity (most viral first)\ncascades.sort(key=lambda x: x[\"spread_velocity\"], reverse=True)\n\nreturn json.dumps({\"cascades\": cascades[:10], \"total_cascades\": len(cascades)})","description":"Models how content propagates through social networks by analyzing repost/share chains. Calculates spread velocity, network depth, and cascade patterns. Returns cascade metrics including origin posts, propagation speed, and network reach."},"typeVersion":1.3},{"id":"3c41062b-6247-4243-9fe6-3440fda6b63a","name":"Temporal Pattern Detector","type":"@n8n/n8n-nodes-langchain.toolCode","position":[1168,848],"parameters":{"language":"python","pythonCode":"import json\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nimport math\n\ndef parse_timestamp(ts):\n try:\n if isinstance(ts, str):\n return datetime.fromisoformat(ts.replace(\"Z\", \"+00:00\"))\n return ts\n except:\n return datetime.now()\n\ndef analyze_temporal_patterns(user_posts):\n if len(user_posts) < 3:\n return None\n \n timestamps = sorted([parse_timestamp(p.get(\"timestamp\", p.get(\"created_at\"))) for p in user_posts])\n \n # Calculate inter-post intervals\n intervals = [(timestamps[i+1] - timestamps[i]).total_seconds() for i in range(len(timestamps)-1)]\n \n if not intervals:\n return None\n \n # Metrics for bot detection\n avg_interval = sum(intervals) / len(intervals)\n \n # Variance in intervals (bots have low variance)\n variance = sum((x - avg_interval)**2 for x in intervals) / len(intervals)\n std_dev = math.sqrt(variance)\n coefficient_of_variation = std_dev / avg_interval if avg_interval > 0 else 0\n \n # Detect burst patterns (many posts in short time)\n burst_threshold = 300 # 5 minutes\n bursts = sum(1 for interval in intervals if interval < burst_threshold)\n burst_ratio = bursts / len(intervals)\n \n # Detect regular intervals (bot-like)\n regular_intervals = sum(1 for interval in intervals if abs(interval - avg_interval) < avg_interval * 0.1)\n regularity_score = regular_intervals / len(intervals)\n \n # Hour-of-day distribution (bots post 24/7)\n hours = [ts.hour for ts in timestamps]\n unique_hours = len(set(hours))\n hour_diversity = unique_hours / 24\n \n # Bot probability calculation\n bot_score = 0\n behavioral_patterns = []\n temporal_anomalies = []\n \n # Low variance = bot-like\n if coefficient_of_variation < 0.3:\n bot_score += 0.25\n behavioral_patterns.append(\"low_interval_variance\")\n temporal_anomalies.append(f\"Consistent posting intervals (CV: {coefficient_of_variation:.2f})\")\n \n # High burst activity\n if burst_ratio > 0.5:\n bot_score += 0.25\n behavioral_patterns.append(\"burst_posting\")\n temporal_anomalies.append(f\"High burst activity ({burst_ratio*100:.0f}% of posts in bursts)\")\n \n # Regular intervals\n if regularity_score > 0.6:\n bot_score += 0.25\n behavioral_patterns.append(\"regular_intervals\")\n temporal_anomalies.append(f\"Highly regular posting pattern ({regularity_score*100:.0f}% regularity)\")\n \n # 24/7 posting\n if hour_diversity > 0.7:\n bot_score += 0.25\n behavioral_patterns.append(\"24_7_activity\")\n temporal_anomalies.append(f\"Posts across {unique_hours} different hours (24/7 pattern)\")\n \n # High frequency\n posts_per_hour = len(user_posts) / ((timestamps[-1] - timestamps[0]).total_seconds() / 3600)\n if posts_per_hour > 5:\n bot_score += 0.15\n behavioral_patterns.append(\"high_frequency\")\n temporal_anomalies.append(f\"High posting rate ({posts_per_hour:.1f} posts/hour)\")\n \n bot_score = min(bot_score, 1.0)\n \n return {\n \"bot_probability\": round(bot_score, 3),\n \"behavioral_patterns\": behavioral_patterns,\n \"temporal_anomalies\": temporal_anomalies,\n \"metrics\": {\n \"avg_interval_seconds\": round(avg_interval, 1),\n \"coefficient_of_variation\": round(coefficient_of_variation, 3),\n \"burst_ratio\": round(burst_ratio, 3),\n \"posts_per_hour\": round(posts_per_hour, 2)\n }\n }\n\n# Main execution\nactivity_data = $fromAI(\"user_activity\", \"Array of user posts with timestamps and user IDs\", \"object\")\n\nif isinstance(activity_data, str):\n activity_data = json.loads(activity_data)\n\nif not isinstance(activity_data, list):\n activity_data = [activity_data]\n\n# Group posts by user\nuser_posts = defaultdict(list)\nfor post in activity_data:\n user_id = post.get(\"user_id\", post.get(\"author_id\", \"unknown\"))\n user_posts[user_id].append(post)\n\n# Analyze each user\nbot_signatures = []\nfor user_id, posts in user_posts.items():\n analysis = analyze_temporal_patterns(posts)\n if analysis and analysis[\"bot_probability\"] > 0.3: # Only report suspicious accounts\n bot_signatures.append({\n \"account_id\": user_id,\n **analysis\n })\n\n# Sort by bot probability\nbot_signatures.sort(key=lambda x: x[\"bot_probability\"], reverse=True)\n\nreturn json.dumps({\"bot_signatures\": bot_signatures[:20], \"total_analyzed\": len(user_posts)})","description":"Detects bot-like behavioral signatures through temporal activity analysis. Identifies posting frequency anomalies, burst patterns, coordinated timing, and suspicious activity rhythms. Returns bot probability scores and behavioral pattern classifications."},"typeVersion":1.3},{"id":"ec2f3afc-f72a-4e1a-907b-600335cc6e3a","name":"Risk Heatmap Generator","type":"@n8n/n8n-nodes-langchain.toolCode","position":[1312,848],"parameters":{"language":"python","pythonCode":"import json\nfrom collections import defaultdict\n\ndef generate_risk_heatmap(narrative_data, bot_data):\n # Parse inputs\n if isinstance(narrative_data, str):\n narrative_data = json.loads(narrative_data)\n if isinstance(bot_data, str):\n bot_data = json.loads(bot_data)\n \n # Extract high-risk topics from narrative clusters\n high_risk_topics = []\n topic_attention = {}\n \n if \"clusters\" in narrative_data:\n for cluster in narrative_data[\"clusters\"]:\n theme = cluster.get(\"theme\", \"\")\n coherence = cluster.get(\"semantic_coherence\", 0)\n post_count = cluster.get(\"post_count\", 0)\n \n # Risk score based on coherence (coordination) and volume\n risk_score = (coherence * 0.6) + (min(post_count / 50, 1.0) * 0.4)\n \n if risk_score > 0.5:\n high_risk_topics.append(theme)\n topic_attention[theme] = {\n \"risk_score\": round(risk_score, 3),\n \"coherence_weight\": round(coherence * 0.6, 3),\n \"volume_weight\": round(min(post_count / 50, 1.0) * 0.4, 3),\n \"post_count\": post_count\n }\n \n # Extract high-risk accounts from bot signatures\n high_risk_accounts = []\n account_attention = {}\n \n if \"bot_signatures\" in bot_data:\n for signature in bot_data[\"bot_signatures\"]:\n account_id = signature.get(\"account_id\", \"\")\n bot_prob = signature.get(\"bot_probability\", 0)\n patterns = signature.get(\"behavioral_patterns\", [])\n \n if bot_prob > 0.5:\n high_risk_accounts.append(account_id)\n \n # Attention weights for explainability\n pattern_weights = {}\n for pattern in patterns:\n if pattern == \"low_interval_variance\":\n pattern_weights[pattern] = 0.25\n elif pattern == \"burst_posting\":\n pattern_weights[pattern] = 0.25\n elif pattern == \"regular_intervals\":\n pattern_weights[pattern] = 0.25\n elif pattern == \"24_7_activity\":\n pattern_weights[pattern] = 0.25\n elif pattern == \"high_frequency\":\n pattern_weights[pattern] = 0.15\n \n account_attention[account_id] = {\n \"bot_probability\": bot_prob,\n \"pattern_weights\": pattern_weights,\n \"total_patterns\": len(patterns)\n }\n \n # Generate explainability summary\n explainability = []\n \n if high_risk_topics:\n explainability.append(f\"Identified {len(high_risk_topics)} coordinated narrative clusters with high semantic coherence, indicating potential information operations.\")\n \n if high_risk_accounts:\n explainability.append(f\"Detected {len(high_risk_accounts)} accounts with bot-like behavioral signatures based on temporal activity patterns.\")\n \n # Attention-based reasoning\n explainability.append(\"Risk assessment uses attention weights: narrative coherence (60%), post volume (40%), and bot behavioral patterns (temporal regularity, burst activity, 24/7 posting).\")\n \n return {\n \"high_risk_topics\": high_risk_topics[:10],\n \"high_risk_accounts\": high_risk_accounts[:20],\n \"attention_weights\": {\n \"topics\": topic_attention,\n \"accounts\": account_attention\n },\n \"explainability\": \" \".join(explainability),\n \"summary\": {\n \"total_risk_topics\": len(high_risk_topics),\n \"total_risk_accounts\": len(high_risk_accounts)\n }\n }\n\n# Main execution\nnarrative_results = $fromAI(\"narrative_analysis\", \"Results from narrative pattern detection including clusters\", \"object\")\nbot_results = $fromAI(\"bot_analysis\", \"Results from bot behavior analysis including signatures\", \"object\")\n\nheatmap = generate_risk_heatmap(narrative_results, bot_results)\n\nreturn json.dumps(heatmap)","description":"Generates a discourse risk heatmap with explainable attention-based reasoning. Combines narrative analysis and bot behavior data to identify high-risk topics and accounts. Returns heatmap with attention weights showing which features contributed to risk assessment."},"typeVersion":1.3},{"id":"0b4acab1-0062-4d6f-acd2-a42544cc1dc4","name":"Format Results","type":"n8n-nodes-base.set","position":[1856,240],"parameters":{"options":{},"assignments":{"assignments":[{"id":"id-1","name":"timestamp","type":"string","value":"={{ $now.toISO() }}"},{"id":"id-2","name":"overall_risk_level","type":"string","value":"={{ $json.output.overall_risk_level }}"},{"id":"id-3","name":"coordinated_narratives_count","type":"number","value":"={{ $json.output.narrative_analysis.coordinated_narratives.length }}"},{"id":"id-4","name":"bot_signatures_count","type":"number","value":"={{ $json.output.bot_analysis.bot_signatures.length }}"},{"id":"id-5","name":"high_risk_topics","type":"string","value":"={{ $json.output.discourse_risk_heatmap.high_risk_topics.join(', ') }}"},{"id":"id-6","name":"recommendations","type":"string","value":"={{ $json.output.recommendations.join(' | ') }}"},{"id":"id-7","name":"full_analysis","type":"string","value":"={{ JSON.stringify($json.output) }}"}]}},"typeVersion":3.4},{"id":"8bfc0c39-06bc-4ca0-8422-5a85dc40b10e","name":"Store Risk Assessment","type":"n8n-nodes-base.googleSheets","position":[2096,240],"parameters":{"operation":"append","sheetName":{"__rl":true,"mode":"list","value":""},"documentId":{"__rl":true,"mode":"list","value":""}},"credentials":{"googleSheetsOAuth2Api":{"id":"hQFe8XTqJEiHL03Z","name":"Google Sheets account"}},"typeVersion":4.7},{"id":"e9ff7af9-f5ae-4052-9251-423fe408e85d","name":"Manipulation Technique Classifier Agent","type":"@n8n/n8n-nodes-langchain.agentTool","position":[1488,608],"parameters":{"text":"={{ $fromAI('content', 'Social media post content to analyze for manipulation techniques') }}","options":{"systemMessage":"You are a Manipulation Technique Classifier specializing in identifying disinformation tactics and propaganda techniques in social media content. Analyze posts to detect: 1) Emotional manipulation (fear appeals, outrage, moral panic), 2) Logical fallacies (strawman, false equivalence, slippery slope, ad hominem), 3) Propaganda techniques (bandwagon, glittering generalities, name-calling, transfer), 4) Disinformation tactics (cherry-picking, misleading statistics, out-of-context quotes). Use the manipulation taxonomy tool to classify techniques and provide detailed explanations of how each technique is employed."},"toolDescription":"Classifies manipulation techniques used in social media posts including emotional appeals, logical fallacies, propaganda techniques, and disinformation tactics. Returns detailed taxonomy of manipulation methods detected in content."},"typeVersion":3},{"id":"7929c058-5979-4b2e-baf4-23504892dc52","name":"Manipulation Classifier Model","type":"@n8n/n8n-nodes-langchain.lmChatOpenAi","position":[1552,816],"parameters":{"model":{"__rl":true,"mode":"id","value":"gpt-4o"},"options":{"temperature":0.1},"builtInTools":{}},"credentials":{"openAiApi":{"id":"mv2ECvRtbAK63G2g","name":"OpenAi account"}},"typeVersion":1.3},{"id":"c76f01d3-a14f-4c69-a31b-b2ee4b463308","name":"Manipulation Taxonomy Tool","type":"@n8n/n8n-nodes-langchain.toolCode","position":[1696,816],"parameters":{"language":"python","pythonCode":"import json\nimport re\n\ndef classify_manipulation_techniques(text):\n if not text:\n return {\"techniques\": [], \"confidence\": 0}\n \n text_lower = text.lower()\n detected_techniques = []\n \n # Emotional manipulation patterns\n emotional_patterns = {\n \"fear_appeal\": [\"danger\", \"threat\", \"risk\", \"unsafe\", \"scary\", \"terrifying\", \"alarming\"],\n \"outrage_generation\": [\"outrageous\", \"unbelievable\", \"shocking\", \"disgusting\", \"appalling\"],\n \"moral_panic\": [\"destroying\", \"ruining\", \"corrupting\", \"threatening our way\"],\n \"emotional_appeal\": [\"heartbreaking\", \"tragic\", \"devastating\", \"touching\"]\n }\n \n for technique, keywords in emotional_patterns.items():\n if any(keyword in text_lower for keyword in keywords):\n detected_techniques.append({\n \"category\": \"emotional_manipulation\",\n \"technique\": technique,\n \"confidence\": 0.7,\n \"explanation\": f\"Uses {technique.replace('_', ' ')} to evoke strong emotional response\"\n })\n \n # Logical fallacy patterns\n fallacy_patterns = {\n \"strawman\": [\"they claim\", \"they say\", \"they want you to believe\"],\n \"false_equivalence\": [\"just as bad as\", \"no different than\", \"same as\"],\n \"slippery_slope\": [\"next thing\", \"before you know it\", \"lead to\", \"will result in\"],\n \"ad_hominem\": [\"hypocrite\", \"liar\", \"corrupt\", \"dishonest\"]\n }\n \n for technique, keywords in fallacy_patterns.items():\n if any(keyword in text_lower for keyword in keywords):\n detected_techniques.append({\n \"category\": \"logical_fallacy\",\n \"technique\": technique,\n \"confidence\": 0.6,\n \"explanation\": f\"Employs {technique.replace('_', ' ')} fallacy to mislead reasoning\"\n })\n \n # Propaganda techniques\n propaganda_patterns = {\n \"bandwagon\": [\"everyone\", \"everybody\", \"most people\", \"join us\", \"be part of\"],\n \"glittering_generalities\": [\"freedom\", \"liberty\", \"justice\", \"truth\", \"patriot\"],\n \"name_calling\": [\"elite\", \"establishment\", \"mainstream\", \"radical\"],\n \"transfer\": [\"our values\", \"our heritage\", \"our tradition\"]\n }\n \n for technique, keywords in propaganda_patterns.items():\n if any(keyword in text_lower for keyword in keywords):\n detected_techniques.append({\n \"category\": \"propaganda\",\n \"technique\": technique,\n \"confidence\": 0.65,\n \"explanation\": f\"Uses {technique.replace('_', ' ')} propaganda technique\"\n })\n \n # Disinformation tactics\n disinfo_patterns = {\n \"cherry_picking\": [\"studies show\", \"research proves\", \"experts say\"],\n \"misleading_statistics\": [r\"\\d+%\", r\"\\d+ times\", \"increase of\"],\n \"out_of_context\": [\"quote\", \"said\", \"stated\", \"according to\"]\n }\n \n for technique, patterns in disinfo_patterns.items():\n for pattern in patterns:\n if re.search(pattern, text_lower):\n detected_techniques.append({\n \"category\": \"disinformation_tactic\",\n \"technique\": technique,\n \"confidence\": 0.5,\n \"explanation\": f\"Potential {technique.replace('_', ' ')} detected\"\n })\n break\n \n # Calculate overall manipulation score\n if detected_techniques:\n avg_confidence = sum(t[\"confidence\"] for t in detected_techniques) / len(detected_techniques)\n manipulation_score = min(len(detected_techniques) * 0.15, 1.0)\n else:\n avg_confidence = 0\n manipulation_score = 0\n \n return {\n \"techniques\": detected_techniques,\n \"total_techniques\": len(detected_techniques),\n \"manipulation_score\": round(manipulation_score, 3),\n \"avg_confidence\": round(avg_confidence, 3)\n }\n\n# Main execution\ncontent = $fromAI(\"text\", \"Text content to analyze for manipulation techniques\", \"string\")\n\nresult = classify_manipulation_techniques(content)\n\nreturn json.dumps(result)","description":"Provides a comprehensive taxonomy of manipulation techniques and propaganda methods. Returns structured classification of disinformation tactics with definitions and detection patterns."},"typeVersion":1.3},{"id":"810203f5-9e1d-40a5-bdb7-4e88817833b3","name":"Sticky Note","type":"n8n-nodes-base.stickyNote","position":[1264,-224],"parameters":{"color":5,"width":416,"height":384,"content":"## Prerequisites\n- Google Sheets API credentials\n- n8n instance (v1.0+)\n- Access to propagation/temporal data APIs\n- Google account with target Sheet pre-created\n## Use Cases\n- Platform trust and safety teams flagging viral misinformation campaigns\n## Customisation\n- Replace Google Sheets with a database or SIEM output\n## Benefits\n- Parallel multi-agent analysis cuts manual review time significantly"},"typeVersion":1},{"id":"2b3650ee-17c2-4736-ad3f-8fa23e15687f","name":"Sticky Note1","type":"n8n-nodes-base.stickyNote","position":[896,-96],"parameters":{"width":320,"height":256,"content":"## Setup Steps\n1. Connect OpenAI credentials to Supervisor, Narrative, Bot, and Manipulation classifier model nodes.\n2. Configure Google Sheets credentials and set target spreadsheet ID in the Store Risk Assessment node.\n3. Set memory buffer windows in each sub-agent's Memory node to match your analysis context length."},"typeVersion":1},{"id":"ad1562ad-766f-4b96-ba55-5c068b76fa67","name":"Sticky Note2","type":"n8n-nodes-base.stickyNote","position":[208,-112],"parameters":{"width":640,"height":288,"content":"## How It Works\nThis workflow automates misinformation and information manipulation detection using a coordinated multi-agent AI architecture. It is designed for trust and safety teams, media analysts, researchers, and platform moderators who need scalable, structured threat assessment. The pipeline begins when a trigger initiates content analysis. A central Misinformation Detection supervisor agent coordinates three specialised sub-agents: a Narrative Pattern Detector that identifies recurring disinformation themes via semantic clustering, a Bot Behaviour Analyser that detects coordinated inauthentic activity using propagation and temporal pattern tools, and a Manipulation Technique Classifier that maps content to known influence tactics using a risk heatmap and taxonomy tools. Each agent uses a dedicated AI model and memory. Results are passed to a structured output parser, formatted for readability, and appended to Google Sheets for ongoing risk tracking and audit."},"typeVersion":1},{"id":"719e3828-1642-45ef-ac33-b5dc28ede110","name":"Sticky Note3","type":"n8n-nodes-base.stickyNote","position":[896,496],"parameters":{"color":7,"width":528,"height":544,"content":"## Bot Behaviour Analyser Agent\n**What:** Analyses propagation cascades and temporal patterns.\n**Why:** Detects coordinated inauthentic amplification.\n"},"typeVersion":1},{"id":"56e03198-80a9-4076-869b-c27d57d6b3c5","name":"Sticky Note4","type":"n8n-nodes-base.stickyNote","position":[192,480],"parameters":{"color":7,"width":688,"height":464,"content":"## Narrative Pattern Detector Agent\n**What:** Identifies disinformation narratives using semantic clustering.\n**Why:** Surfaces recurring themes missed by keyword filters.\n"},"typeVersion":1},{"id":"350c53a5-0524-409d-b76b-9cd1341b517d","name":"Sticky Note5","type":"n8n-nodes-base.stickyNote","position":[208,192],"parameters":{"color":7,"width":1216,"height":272,"content":"## Misinformation Detection Agent\n**What:** Supervises three sub-agents via chat memory and tool routing.\n**Why:** Centralises orchestration for consistent, parallel analysis."},"typeVersion":1},{"id":"0b877e12-69b8-4621-b719-f76f5d05f466","name":"Sticky Note6","type":"n8n-nodes-base.stickyNote","position":[1440,400],"parameters":{"color":7,"width":560,"height":576,"content":"## Manipulation Technique Classifier Agent\n**What:** Maps content to manipulation tactics with risk heatmap.\n**Why:** Enables structured, taxonomy-driven threat scoring."},"typeVersion":1},{"id":"234b11d9-fa6d-4788-86c3-89b54dd72133","name":"Sticky Note7","type":"n8n-nodes-base.stickyNote","position":[1776,128],"parameters":{"color":7,"width":528,"height":256,"content":"## Format Results & Store Risk Assessment\n**What:** Parses, formats, and appends findings to Google Sheets.\n**Why:** Ensures auditable, structured reporting."},"typeVersion":1}],"active":false,"pinData":{},"settings":{"binaryMode":"separate","availableInMCP":false,"executionOrder":"v1"},"versionId":"682a8920-4842-4bba-9a20-8c3fdcc7353b","connections":{"Format Results":{"main":[[{"node":"Store Risk Assessment","type":"main","index":0}]]},"Start Analysis":{"main":[[{"node":"Misinformation Detection Supervisor","type":"main","index":0}]]},"Supervisor Model":{"ai_languageModel":[[{"node":"Misinformation Detection Supervisor","type":"ai_languageModel","index":0}]]},"Bot Analyzer Model":{"ai_languageModel":[[{"node":"Bot Behavior Analyzer Agent","type":"ai_languageModel","index":0}]]},"Risk Heatmap Generator":{"ai_tool":[[{"node":"Bot Behavior Analyzer Agent","type":"ai_tool","index":0}]]},"Narrative Detector Model":{"ai_languageModel":[[{"node":"Narrative Pattern Detector Agent","type":"ai_languageModel","index":0}]]},"Semantic Clustering Tool":{"ai_tool":[[{"node":"Narrative Pattern Detector Agent","type":"ai_tool","index":0}]]},"Temporal Pattern Detector":{"ai_tool":[[{"node":"Bot Behavior Analyzer Agent","type":"ai_tool","index":0}]]},"Manipulation Taxonomy Tool":{"ai_tool":[[{"node":"Manipulation Technique Classifier Agent","type":"ai_tool","index":0}]]},"Bot Behavior Analyzer Agent":{"ai_tool":[[{"node":"Misinformation Detection Supervisor","type":"ai_tool","index":0}]]},"Propagation Cascade Analyzer":{"ai_tool":[[{"node":"Bot Behavior Analyzer Agent","type":"ai_tool","index":0}]]},"Manipulation Classifier Model":{"ai_languageModel":[[{"node":"Manipulation Technique Classifier Agent","type":"ai_languageModel","index":0}]]},"Risk Assessment Output Parser":{"ai_outputParser":[[{"node":"Misinformation Detection Supervisor","type":"ai_outputParser","index":0}]]},"Narrative Pattern Detector Agent":{"ai_tool":[[{"node":"Misinformation Detection Supervisor","type":"ai_tool","index":0}]]},"Misinformation Detection Supervisor":{"main":[[{"node":"Format Results","type":"main","index":0}]]},"Manipulation Technique Classifier Agent":{"ai_tool":[[{"node":"Misinformation Detection Supervisor","type":"ai_tool","index":0}]]}}} \ No newline at end of file