package FactStudio;
use JSON;
use Digest::MD5 qw(md5_hex);
use DateTime;
use DateTime::Format::Strptime;
use HTML::Entities;
use Encode;
use utf8;


my $workingdir = "tmp/";
my $musicdir = "music/";
my $outputdir = "output/";
my $funnysounddir = "funnysounds/";
my $facesdir = "faces/";
my $imgdir = "images/";
my $phrasesdir = "phrases/";
my $cachedir = "cache/";
my $videodir = "video/";
my $basename = "final.mp4";


open(CHATTER, "<".$phrasesdir."chatter.txt");
chomp (my @chatter = <CHATTER>);
close CHATTER;

open(INTRODUCTIONS, "<".$phrasesdir."introductions.txt");
chomp (my @introductions = <INTRODUCTIONS>);
close INTRODUCTIONS;

open(REACTIONS, "<".$phrasesdir."reactions.txt");
chomp (my @reactions = <REACTIONS>);
close REACTIONS;

open(TRANSCRIPT, ">".$workingdir."transcript.txt");

sub factcheckingday {
	generate_audio("fcd1","Happy fact checking day from Artie Ficial and Bottie McBotface, hosts of The Daily NOT!", "artie");
	generate_audio("fcd2","Even robots agree: stay vigilant against misinformation, especially when it is generated with artificial intelligence!", "botty");	
}

sub test_it {
	generate_audio('test','Hallo wereld, 1, 2, 3, 4, 5.  Test.','nederlands');
	generate_audio('testfast','Hallo wereld, 1, 2, 3, 4, 5.  Test.','nederlandsfast');
}

sub generate_commands {
	my $date = shift;
	my @links = get_factcheck_urls($date);
	print "#!/bin/bash\n";
	foreach my $link (@links){
		my @videos = get_video_links($link);
		print "perl tarantino.pl ".$link.($videos[0]?" '$videos[0]'":"").";\n";
	}
	print "perl stewart.pl $date \"\"\n";
}

sub get_video_links{
	my $url = shift;
	my @links;
	my $html = `curl -s $url`;
	$html =~ s/mod-full-article-appendix.*$//gsi;
	while ($html =~ /\"(http[^\"]+?(tiktok\.com|facebook\.com|youtube\.com|instagram\.com|twitter\.com|x\.com)[^\"]+?)\"/gsi){
		my $link = $1;
		next if ($link =~ /\/intent\/tweet/);
		push @links, $link;
	}
	return @links;
}

sub get_direct_ig_video_link{
	my $url = shift;
	print "URL: $url\n";
	if($url =~ /(https\:\/\/(www\.)?instagram\.com\/(p|reel)\/(.+?))(\/|$)/){
		my $jsonurl = $1.'/?__a=1&__d=dis';
		my $json = `curl "$jsonurl"`;
		print $json;
		if ($json =~ /\"url\"\:\"https.+?\.mp4\?.+?\"/gsi){
			return $1;	
		}
		
	}
	else {
		return $url;
	}
}

sub build_talkshow{
	my $date = shift;
	my %strings;
	my @strings;
	my %basename_by_title;
	my $headlines;
	my @links = get_factcheck_urls($date);
	foreach my $link (@links){
		my $strings = get_strings($link);
		push @strings, $strings;
		$basename_by_title{$strings->{title}} = get_basename($link);	
	}
	foreach $factcheck (@strings){	
		my $title = $factcheck->{title};
		$title =~ s/Fact\sCheck\://gsi;
		$title =~ s/\s*\-\-.+?$//gsi;
		$headlines .= "$title.\n";
	}

	# Clean up working directory
	system("rm ".$workingdir."talkshow*.mp3");
	system("rm ".$workingdir."talkshow*.mp4");
	
	# Build intro chatter segment
	generate_preshow($date);
	
	
	# Build opening credits
	generate_audio("intro1","Hello and welcome to The Daily NOT! A show about what didn't happen today. My name is Artie Ficial and my jokes are mostly powered by ChatGPT.", "artie");
	generate_audio("intro2","And I'm Botty McBotface. Most of what I know comes from fact checks published by Lead Stories.", "botty");
	generate_audio("intro3","So, Botty, what do we have today?", "artie");
	generate_audio("intro4","Here's an overview of what didn't happen:", "botty");
	system("cat ".$workingdir."intro*.mp3 > ".$workingdir."talkshow01.mp3");
	overlay_sound($imgdir."TDN-on-air.png", $workingdir."talkshow01.mp3", $workingdir."intro.mp4");
	overlay_video($videodir."TheDailyNotOpening-newlogo.mp4", $workingdir."intro.mp4", $workingdir."talkshow01.mp4");
	mix_in_music($workingdir."talkshow01.mp4",$musicdir."newsreportmusic-6242.mp3","0.10");
	
	# Build headline teaser section
	generate_audio("talkshow02",$headlines, "bottyfast");
	overlay_sound($imgdir."TDN-on-air.png", $workingdir."talkshow02.mp3", $workingdir."headlines.mp4");
	# Get headlines screenshot
	get_screenshot_headlines($date);
	# Generate scroll video
	generate_scroll_video($workingdir."headlines.png",$workingdir."talkshow02.mp3",scalar(@links),$workingdir."scrollvideo.mp4");
	# Overlay scroll video
	overlay_video($workingdir."scrollvideo.mp4", $workingdir."headlines.mp4",$workingdir."talkshow02.mp4");
	mix_in_music($workingdir."talkshow02.mp4",$musicdir."the-breaking-news-161462.mp3","0.10");
	generate_audio("talkshow03","So all of that didn't happen?  Let's get down into details.", "artie");
	overlay_sound($imgdir."TDN-interstitial.png", $workingdir."talkshow03.mp3", $workingdir."talkshow03.mp4");
	
	# Build individual story segments
	my $counter = 4;
	foreach $factcheck (@strings){
		unless ($counter == 4){
			generate_interstitial("talkshow".$counter++);
		}
		generate_segment("talkshow".$counter++, $factcheck, $basename_by_title{$factcheck->{title}});
	}
	
	# Build end screen 
	generate_audio("credits1","That's all folks!", "artie");
	generate_audio("credits2","Please like, share and follow!", "botty");
	system("cat ".$workingdir."credits*.mp3 > ".$workingdir."credits.mp3");
	overlay_sound($imgdir."TDN-off-air.png", $workingdir."credits.mp3", $workingdir."credits.mp4");
	overlay_video($videodir."TDN-credits.mp4", $workingdir."credits.mp4", $workingdir."talkshow99.mp4");
	
	# Put it all together
	system("cat ".$workingdir."talkshow*.mp3 > ".$outputdir."talkshow.mp3");
	merge_talkshow($date);
}

sub build_fast_talkshow{
	my $date = shift;
	my $teaser = shift;
	my %strings;
	my @strings;
	my %basename_by_title;
	my $headlines;
	my @links = get_factcheck_urls($date);
	foreach my $link (@links){
		my $strings = get_strings($link);
		push @strings, $strings;
		$basename_by_title{$strings->{title}} = get_basename($link);	
	}
	foreach $factcheck (@strings){	
		my $title = $factcheck->{title};
		$title =~ s/Fact\sCheck\://gsi;
		$title =~ s/\s*\-\-.+?$//gsi;
		$headlines .= "$title.\n";
	}
	
	print TRANSCRIPT "HEADLINES\n";
	print TRANSCRIPT "$headlines\n";
	print TRANSCRIPT "----------------------------------------------\n";

	# Clean up working directory
	system("rm ".$workingdir."talkshow*.mp3");
	system("rm ".$workingdir."talkshow*.mp4");
	system("rm ".$workingdir."intro*.mp3");
	
	build_teaser($teaser);
	
	# Build opening credits
	generate_audio("intro1","Hello and welcome to The Daily NOT! A show about what didn't happen today. My name is Artie Ficial and my jokes are mostly powered by ChatGPT.", "artie");
	generate_audio("intro2","And I'm Botty McBotface. Most of what I know comes from fact checks published by Lead Stories.", "botty");
	system("cat ".$workingdir."intro*.mp3 > ".$workingdir."talkshow01.mp3");
	#overlay_sound($imgdir."TDN-on-air.png", $workingdir."talkshow01.mp3", $workingdir."intro.mp4");
	overlay_chyron($videodir."TheDailyNotOpening-newlogo.mp4", $date, $headlines, $workingdir."opening.mp4",$workingdir."talkshow01.mp3");
	overlay_video($workingdir."opening.mp4", $workingdir."intro.mp4", $workingdir."talkshow01.mp4");
	mix_in_music($workingdir."talkshow01.mp4",$musicdir."newsreportmusic-6242.mp3","0.10");
	generate_audio("talkshow03","Let's get started!.", "artie");
	overlay_sound($imgdir."TDN-interstitial.png", $workingdir."talkshow03.mp3", $workingdir."talkshow03.mp4");
	
	# Build individual story segments
	my $counter = 4;
	foreach $factcheck (@strings){
		unless ($counter == 4){
			generate_interstitial("talkshow".$counter++);
		}
		generate_segment("talkshow".$counter++, $factcheck, $basename_by_title{$factcheck->{title}});
	}
	
	# Build end screen 
	generate_audio("credits1","That's all folks!", "artie");
	generate_audio("credits2","Please like, share and follow!", "botty");
	system("cat ".$workingdir."credits*.mp3 > ".$workingdir."credits.mp3");
	overlay_sound($imgdir."TDN-off-air.png", $workingdir."credits.mp3", $workingdir."credits.mp4");
	overlay_video($videodir."TDN-credits.mp4", $workingdir."credits.mp4", $workingdir."talkshow99.mp4");
	
	# Put it all together
	system("cat ".$workingdir."talkshow*.mp3 > ".$outputdir."talkshow.mp3");
	merge_talkshow($date);
}

sub overlay_chyron{
	my $inputvideo = shift;
	my $date = shift;
	my $headlines = shift;
	my $output = shift;
	my $soundfile_for_duration = shift;
	
	my $command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$soundfile_for_duration;
	my $duration = `$command`;
	chomp $duration;
	
	my $parser = DateTime::Format::Strptime->new( pattern => '%Y/%m/%d', time_zone => 'UTC');
	my $dt = $parser->parse_datetime($date);
	my $human_readable_date = $dt->strftime('%B %d, %Y');
	my $topics = generate_topics($headlines);
	
	#my $commonvf = "drawtext=fontfile=fonts/Perfect DOS VGA 437.ttf:fontcolor=white::";
	my @vfs = (
		"drawtext=fontfile=fonts/HeadlinerNo45.ttf:text='".$human_readable_date."':fontcolor=white:x=310:y=630:x=(w-text_w)/2:y=630:fontsize=120:shadowx=8:shadowy=8",
		"drawtext=fontfile=fonts/HeadlinerNo45.ttf:textfile=phrases/chyron.txt:fontcolor=white:x=100:y=1300:box=1:boxcolor=blue\\\@0.5:boxborderw=15:fontsize=80:shadowx=4:shadowy=4",
		"drawtext=textfile=tmp/topics.txt:fontcolor=white:x=100-t*500:y=1420:box=1:boxcolor=red:boxborderw=15:fontsize=80:shadowx=3:shadowy=3"		
	);
	
	my $vf = join(",",@vfs);
	print("ffmpeg -y -t $duration -i $inputvideo -vf \"".$vf."\" -codec:a copy $output");
	system("ffmpeg -y -t $duration -i $inputvideo -vf \"".$vf."\" -codec:a copy $output");
	
}

sub build_teaser{
	my $teaser = shift;
	generate_audio("teaser",$teaser, "bottyfast");
	overlay_sound($imgdir."TDN-off-air.png", $workingdir."teaser.mp3", $workingdir."teaser.mp4");
	my $command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."teaser.mp3";
	my $duration = `$command`;
	chomp $duration;
	overlay_video($videodir."TheDailyNotColdOpen.mp4", $workingdir."teaser.mp4", $workingdir."talkshow00.mp4",$duration);
}

sub regenerate_segment{
	my $link = shift;
	my $segmentnumber = shift;
	my $quip = shift;
	my $factcheck = get_strings($link);
	my $basename = get_basename($link);
	generate_segment("talkshow".$segmentnumber, $factcheck, $basename,$quip,"nocache");
	merge_talkshow($date,"nocache");
}

sub generate_interstitial {
	my $filename = shift;
	if ($filename =~ /^talkshow(\d)$/){
		$filename = "talkshow0".$1;
	}
	unless (-f $cachedir."interstitial.mp4"){
		overlay_sound($imgdir."TDN-interstitial.png", $funnysounddir."quick-swhooshing-noise-80898.mp3", $cachedir."interstitial.mp4");
	}
	system("cp ".$cachedir."interstitial.mp4 ".$workingdir.$filename.".mp4");
}

sub generate_preshow{
	my $date = shift;
	# Chatter jokes are stored
	# First sentence.|Second sentence.
	# If Botty is supposed to speak first, the line starts with "B:"
	my $line = $chatter[rand(@chatter)];
	my @lines = split(/\|/,$line);
	# If Botty starts:
	if ($lines[0] =~ /^B\:/){
		$lines[0] =~ s/^B\://gsi;
		generate_audio("preshow1",$lines[0],"bottyphone");
		generate_audio("preshow2",$lines[1],"artiephone");
	}
	# Artie starts
	else {
		generate_audio("preshow1",$lines[0],"artiephone");
		generate_audio("preshow2",$lines[1],"bottyphone");
	}
	
	generate_audio("preshow3","Attention! The Daily Not, $date","arthurphonefast");
	generate_audio("preshow4","Live in 3... 2... 1...","arthurphone");
	system("cat ".$workingdir."preshow*.mp3 > ".$workingdir."preshow.mp3");
	
	my $parser = DateTime::Format::Strptime->new( pattern => '%Y/%m/%d', time_zone => 'UTC');
	my $dt = $parser->parse_datetime($date);
	my $human_readable_date = $dt->strftime('%B %d, %Y');
	print $human_readable_date;
	
	overlay_sound($imgdir."TDN-off-air.png", $workingdir."preshow.mp3", $workingdir."preshow.mp4");
	
	open(CPROMPT, ">".$workingdir."cprompt.txt");
	print CPROMPT "C:\\\\>TheDailyNot.exe\n";
	print CPROMPT "$human_readable_date\n";
	print CPROMPT "Loading...\n";
	close CPROMPT;
	
	my $commonvf = "drawtext=fontfile=fonts/Perfect DOS VGA 437.ttf:fontcolor=white:fontsize=60:";
	my @vfs = (
		$commonvf."textfile=tmp/cprompt.txt:x=210:y=350",
		$commonvf."text='_':x=210:y=590:enable=lt(mod(t\\,0.5)\\,0.25)",
		$commonvf."text='[  ] Booting robots':x=210:y=650",
		$commonvf."text='[  ]':x=210:y=710",
		$commonvf."text='[  ] Turn on sound':x=210:y=710:enable=lt(mod(t\\,1)\\,0.5)",
		$commonvf."text='[  ] Checking facts':x=210:y=770",
		$commonvf."text='[  ] Loading jokes':x=210:y=830",
		$commonvf."text='[  ] Starting video':x=210:y=890",
		$commonvf."textfile=phrases/showtitle.txt:x=210:y=1100",
		$commonvf."text='\\ OK':fontcolor=green:x=210:y=650:enable=gt(t\\,2)",
		$commonvf."text='\\ OK':fontcolor=green:x=210:y=710:enable=gt(t\\,4)",
		$commonvf."text='\\ OK':fontcolor=green:x=210:y=770:enable=gt(t\\,6)",
		$commonvf."text='\\ OK':fontcolor=green:x=210:y=830:enable=gt(t\\,8)",
		$commonvf."text='\\ OK':fontcolor=green:x=210:y=890:enable=gt(t\\,10)"		
	);
	
	my $vf = join(",",@vfs);
	
	print("ffmpeg -y -i ".$workingdir."preshow.mp4 -vf \"".$vf."\" -codec:a copy ".$workingdir."talkshow00.mp4");
	system("ffmpeg -y -i ".$workingdir."preshow.mp4 -vf \"".$vf."\" -codec:a copy ".$workingdir."talkshow00.mp4");
	#system("ffmpeg -y -i ".$workingdir."preshow.mp4 -vf \"drawtext=fontfile=fonts/Perfect DOS VGA 437.ttf:textfile=tmp/cprompt.txt:fontcolor=white:fontsize=60:x=210:y=350,drawtext=fontfile=fonts/Perfect DOS VGA 437.ttf:text='_':fontcolor=white:fontsize=60:x=210:y=590:enable=lt(mod(t\\,0.5)\\,0.25)\" -codec:a copy ".$workingdir."talkshow00.mp4");
	#ffmpeg -y -i tmp/talkshow00.mp4 -vf "drawtext=fontfile=fonts/Perfect DOS VGA 437.ttf:textfile=cprompt.txt:fontcolor=white:fontsize=60:x=210:y=350,drawtext=fontfile=fonts/Perfect DOS VGA 437.ttf:text='_':fontcolor=white:fontsize=60:x=210:y=590:enable=lt(mod(t\,0.5)\,0.25)"  -codec:a copy tmp/text.mp4
	mix_in_music($workingdir."talkshow00.mp4",$musicdir."old-desktop-pc-booting-24280.mp3","0.50")
}




sub merge_talkshow{
	my $date = shift;
	my $nocache = shift;
	open(FRAMESFILE, ">", $workingdir."talkshowframeslist.txt");
	# Read list of files
	opendir my $dir, "$workingdir" or die "Cannot open directory: $!";
	my @files = readdir $dir;
	closedir $dir;
	# Remove .. and .
	pop @files;
	pop @files;
	foreach $file (sort @files){
		if ($file =~ /talkshow.+?\.mp4/){
			print FRAMESFILE "file $file\n";
		}
	}
	close FRAMESFILE;
	$date =~ s/\///gsi;
	system("ffmpeg -y -safe 0 -f concat -i ".$workingdir."talkshowframeslist.txt -c copy ".$outputdir."thedailynot-".$date.".mp4");
	add_captions($outputdir."thedailynot-".$date.".mp4","thedailynot-".$date,"blue",140,$nocache);
	system("cp ".$workingdir."transcript.txt ".$outputdir."thedailynot-".$date.".txt");
}

sub generate_segment {
	my $filename = shift;
	my $strings = shift;
	my $basename = shift;
	my $quip = shift;
	my $nocache = shift;
	
	if ($filename =~ /^talkshow(\d)$/){
		$filename = "talkshow0".$1;
	}
	
	unless ($nocache){
		if (-f $cachedir."segment-".$basename.".mp4"){
			system ("cp ".$cachedir."segment-".$basename.".mp4 ".$workingdir.$filename.".mp4");
			return;		
		}
	}
	
	$filename =~ /([1-9]?\d+)$/;
	my $segmentid = $1;
	
	print TRANSCRIPT "\n\nSEGMENT $segmentid\n";
	print TRANSCRIPT $strings->{title}."\n";
	print TRANSCRIPT "To regenerate: perl rerun.pl ".$strings->{link}." $segmentid \"QUIP\"\n";
	print TRANSCRIPT "----------------------------------------------------------------------------------\n";
	
	generate_audio("tquestion",$introductions[rand(@introductions)]." ".$strings->{question}, "artie");
	generate_audio("tanswer",$strings->{secondsentence}, "botty");
	generate_audio("tbanter",$reactions[rand(@reactions)], "artiefast");
	generate_audio("tanswer2",$strings->{lastpart}?$strings->{lastpart}:"And that's all there is to it.", "botty");
	unless ($quip){
		$quip = generate_quip($strings->{title}, $strings->{fulltext});
	}
	generate_audio("tquip",$quip, "artiefast");
	system("cat ".$workingdir."tquestion.mp3 ".$workingdir."tanswer.mp3 ".$workingdir."tbanter.mp3 ".$workingdir."tanswer2.mp3 ".$workingdir."tquip.mp3 > ".$workingdir.$filename.".mp3");
	overlay_sound($imgdir."TDN-on-air.png", $workingdir.$filename.".mp3", $workingdir."background.mp4");
	# TODO: figure out a way to skip first 0.1 seconds of the nosubs video.
	overlay_video($outputdir.$basename."-nosubs.mp4", $workingdir."background.mp4",$workingdir.$filename.".mp4");
	mix_in_music($workingdir.$filename.".mp4",$musicdir."funky-loop-40283.mp3","0.10");	
	system ("cp ".$workingdir.$filename.".mp4 ".$cachedir."segment-".$basename.".mp4");
	
}

# Add a music track to a video with a certain volume.  New video will have same duration as original video.
sub mix_in_music {
	my $video = shift;
	my $music = shift;
	my $volume = shift;
	system("ffmpeg -y -i $video -stream_loop -1 -i $music -filter_complex '[0:a]volume=1[a0]; [1:a]volume=".$volume."[a1]; [a0][a1]amerge=inputs=2[a]' -map 0:v -map '[a]' -c:v copy -c:a aac -ac 2 ".$workingdir."music.mp4");
	system("mv ".$workingdir."music.mp4 $video");
}

# Take a still image and turn it into a video that has the exact same duration as sound.
sub overlay_sound {
	my $image = shift;
	my $sound = shift;
	my $output = shift;
	my $command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$sound;
	my $duration = `$command`;
	chomp $duration;
	system("ffmpeg -y -loop 1 -i $image -i $sound -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -ac 2 -t $duration $output");
}

sub generate_scroll_video {
	my $image = shift;
	my $sound = shift;
	my $storycount = shift;
	my $output= shift;
	my $command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$sound;
	my $duration = `$command`;
	chomp $duration;
	my $pps = 240/($duration/$storycount); # Number of seconds per story
	system("ffmpeg -y -loop 1 -i $image -filter_complex 'crop=800:1422:0:0+t*$pps' -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t $duration $output");		
}

sub overlay_video {
	my $scrollvideo = shift;
	my $talkvideo = shift;
	my $output = shift;
	my $duration = shift;
	unless ($duration){
		system("ffmpeg -y -i $scrollvideo -i $talkvideo -filter_complex '[0]scale=746:-1 [pip]; [1][pip] overlay=main_w/2-overlay_w/2-7:320' -map 1:a -ac 2 $output");
	}
	else {
		system("ffmpeg -y -t $duration -i $scrollvideo -i $talkvideo -filter_complex '[0]scale=746:-1 [pip]; [1][pip] overlay=main_w/2-overlay_w/2-7:320' -map 1:a -ac 2 $output");
	}
}

sub get_factcheck_urls {
	my $date = shift;
	my @links;
	my $url = "https://leadstories.com/hoax-alert/$date/";
	my $html = `curl -s $url`;
	$html =~ s/<span>About.+$//gsi;
	while ($html =~ /(https\:\/\/leadstories\.com\/hoax\-alert\/[^\"]+?\.html)/gsi){
		push @links, decode_entities($1);
	}
	return @links;
}
	

sub build_video {
	my $factcheckurl = shift;
	my $pipvideourl = shift;
	$basename = get_basename($factcheckurl);
	print $basename."\n";
	get_frames($factcheckurl);
	my $strings = get_strings($factcheckurl);
	generate_audio("question", $strings->{question}, "fast");
	generate_audio("secondsentence", $strings->{secondsentence});
	generate_audio("lastpart", $strings->{lastpart}." Full details and sources on lead stories dot com.");
	my $hashtags = get_hashtags($strings->{title},$strings->{fulltext});
	generate_textfile($strings->{title},$hashtags,$factcheckurl);
	build_frames($pipvideourl);
	system ("mv ".$workingdir."final.mp4 ".$outputdir.$basename.".mp4");
}

sub build_video_v2 {
	my $factcheckurl = shift;
	my $pipvideourl = shift;
	$basename = get_basename($factcheckurl);
	my $language = get_language($factcheckurl);
	print $basename."--".$language."\n";
	get_frames_v2($factcheckurl);
	my $strings = get_strings($factcheckurl);
	generate_audio("question", $strings->{question}, "fast".$language);
	generate_audio("secondsentence", $strings->{secondsentence},$language);
	generate_audio("lastpart", $strings->{lastpart},$language);
	generate_audio("details", $strings->{closingmessage},$language);
	my $hashtags = get_hashtags($strings->{title},$strings->{fulltext});
	generate_textfile($strings->{title},$hashtags,$factcheckurl);
	build_frames_v2($pipvideourl, $strings->{fulltext},$strings->{title},$strings->{evidenceimage});
	system ("mv ".$workingdir."final.mp4 ".$outputdir.$basename.".mp4");
	system ("mv ".$workingdir."nosubs.mp4 ".$outputdir.$basename."-nosubs.mp4");
}

sub generate_textfile {
	open(FH, ">".$outputdir.$basename.".txt");
	print FH join("\n",@_);
	close FH;
}

sub get_basename {
	my $factcheckurl = shift;
	$factcheckurl =~ /\/([^\/]+?)\.html/;
	return $1;
}

sub get_language {
	my $factcheckurl = shift;
	if ($factcheckurl =~ /\/\/(.+?)\.leadstories/){
		return $1;
	}
	else {
		return "";
	}
}

sub pick_funnysound {
	# Returns one random filename from the /funnysounds folder
	# Read list of files
	opendir my $dir, "$funnysounddir" or die "Cannot open directory: $!";
	my @files = sort(readdir $dir);
	closedir $dir;
	# Remove .. and .
	shift @files;
	shift @files;
	# Return a random value
	return $files[rand(@files)];
}

sub build_frames {
	my $pipvideourl = shift;
	my $funnysoundfile = pick_funnysound();
	my $command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."question.mp3";
	my $questionduration = `$command`;
	$command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 $funnysounddir"."$funnysoundfile";
	my $funnysoundduration = `$command`;
	$command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."secondsentence.mp3";
	my $secondsentenceduration =  `$command`;
	$command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."lastpart.mp3";
	my $lastpartduration =  `$command`;

	chomp $questionduration;
	chomp $funnysoundduration;
	chomp $secondsentenceduration;
	chomp $lastpartduration;
	
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame0.png -i ".$workingdir."question.mp3 -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t $questionduration ".$workingdir."question.mp4");
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame1.png -i ".$funnysounddir.$funnysoundfile." -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t ".$funnysoundduration." ".$workingdir."funnysound.mp4");
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame2.png -i ".$workingdir."secondsentence.mp3 -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t $secondsentenceduration ".$workingdir."secondsentence.mp4");
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame3.png -i ".$workingdir."lastpart.mp3 -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t $lastpartduration ".$workingdir."lastpart.mp4");
	
	if ($pipvideourl){
		embed_pip($pipvideourl, $questionduration);
	}
	
	open(FRAMESFILE, ">", $workingdir."frameslist.txt");
	print FRAMESFILE "file ./question.mp4\n";
	print FRAMESFILE "file ./funnysound.mp4\n";
	print FRAMESFILE "file ./secondsentence.mp4\n";
	print FRAMESFILE "file ./lastpart.mp4\n";
	close FRAMESFILE;
	
	system("ffmpeg -y -safe 0 -f concat -i ".$workingdir."frameslist.txt -c copy ".$workingdir."output.mp4");
	
	system("cp ".$musicdir."justbecause.mp4 ".$workingdir."justbecause.mp4");
	
	open(FINALFILE, ">", $workingdir."finallist.txt");
	print FINALFILE "file ./output_with_music.mp4\n";
	print FINALFILE "file ./justbecause.mp4\n";
	close FINALFILE;
	
	# Volume van 0.10 naar 0.07 gebracht
	system("ffmpeg -y -i ".$workingdir."output.mp4 -i ".$musicdir."the-breaking-news-161462.mp3 -filter_complex '[0:a]volume=1[a0]; [1:a]volume=0.07[a1]; [a0][a1]amerge=inputs=2[a]' -map 0:v -map '[a]' -c:v copy -c:a aac -shortest -ac 2 ".$workingdir."output_with_music.mp4");
	system("ffmpeg -y -safe 0 -f concat -i ".$workingdir."finallist.txt -c copy ".$workingdir."final.mp4");
}

# TODO:
# * Add evidence video url custom field in MT
# * Add existence of evidence image or video to strings.json
# * Add values of evidence image and evidence video to call to build_frames_v2
# * Add conditionals (see below)

sub build_frames_v2 {
	my $pipvideourl = shift;
	my $fulltext = shift;
	my $title = shift;
	my $evidenceimage = shift;
	my $funnysoundfile = pick_funnysound();
	my $command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."question.mp3";
	my $questionduration = `$command`;
	$command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 $funnysounddir"."$funnysoundfile";
	my $funnysoundduration = `$command`;
	$command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."secondsentence.mp3";
	my $secondsentenceduration =  `$command`;
	$command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."lastpart.mp3";
	my $lastpartduration =  `$command`;
	$command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."details.mp3";
	my $detailsduration =  `$command`;	

	chomp $questionduration;
	chomp $funnysoundduration;
	chomp $secondsentenceduration;
	chomp $lastpartduration;
	chomp $detailsduration;
	
	# Generate ultra-short clips of the frames needed for the transitions
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame1.png -r 25 -f lavfi -i anullsrc=channel_layout=stereo:sample_rate=48000 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t 0.25 ".$workingdir."frame1.mp4");
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame2.png -r 25 -f lavfi -i anullsrc=channel_layout=stereo:sample_rate=48000 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t 0.25 ".$workingdir."frame2.mp4");
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame3.png -r 25 -f lavfi -i anullsrc=channel_layout=stereo:sample_rate=48000 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t 0.25 ".$workingdir."frame3.mp4");
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame4.png -r 25 -f lavfi -i anullsrc=channel_layout=stereo:sample_rate=48000 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t 0.25 ".$workingdir."frame4.mp4");

	# TODO:
	# Improve thumbnails by showing the claim in words (will help with clicks in search hopefully)
	# Get claim from headline (like in artie botty show)
	
	my $claim = $title;
	$claim =~ s/Fact\sCheck\:\s*//gsi;
	$claim =~ s/\s*\-\-.+?$//gsi;

	# Generate drawtext filter to put the claim in the middle of the video (taking care not to overlap with audio captions), white letters on red background, write to file (like for audio captions)
	# Use this drawtext while generating the subliminal.mp4
	# Chop the claim in 20 character max chunks so the chunks each fit on a line
	my @chunks;
	$claim =~ s/\%/\\\%/gsi;
    while ($claim =~ /\G(.{1,20})(?:\s+|$)/g) {
        push @chunks, $1;
    }
	open(CLAIM, ">:encoding(utf-8)", "".$workingdir."claim.txt");
	print CLAIM join("\n",@chunks);
	close CLAIM;
	
	my @filters;
	
	push @filters, "drawtext=fontfile=fonts/Montserrat-VariableFont_wght.ttf\\\\:style=Semibold:textfile=".$workingdir."claim.txt:x=50:y=700:fontsize=80:fontcolor=black:box=1:boxcolor=white:boxborderw=10";
	push @filters, "drawtext=fontfile=fonts/HeadlinerNo45.ttf:text='Fact Check':x=50:y=600:fontsize=80:fontcolor=white:box=1:boxcolor=#AF1B14:boxborderw=10";
	
	
	open(FILTERS, ">".$workingdir."claimfilters.txt");
	print FILTERS join(",\n", @filters);
	close (FILTERS);

	# Flash the frame where the caption appears for a tenth of a second so it becomes the video thumbnail
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame1.png -f lavfi -i anullsrc=channel_layout=stereo:sample_rate=48000 -filter_complex_script '".$workingdir."claimfilters.txt' -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t 0.1 ".$workingdir."subliminal.mp4");
	
	# Ask the question
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame0.png -i ".$workingdir."question.mp3 -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t $questionduration ".$workingdir."nofacequestion.mp4");
	
	# Add the zoomed out face video to it
	build_facequestion($workingdir."nofacequestion.mp4",$questionduration);
		
	# Record scratch & show the caption...
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame1.png -i ".$funnysounddir.$funnysoundfile." -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t ".$funnysoundduration." ".$workingdir."funnysound.mp4");

	# Transition
	system(" ffmpeg -y -i ".$workingdir."frame1.mp4 -i ".$workingdir."frame2.mp4 -filter_complex xfade=transition=slideleft:duration=0.2:offset=0  -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t 0.25 ".$workingdir."transition1-2.mp4");
	
	# No, that's not true: bla di bla...
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame2.png -i ".$workingdir."secondsentence.mp3 -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t $secondsentenceduration ".$workingdir."secondsentence.mp4");
	
	# Transition
	system(" ffmpeg -y -i ".$workingdir."frame2.mp4 -i ".$workingdir."frame3.mp4 -filter_complex xfade=transition=slidedown:duration=0.2:offset=0  -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t 0.25 ".$workingdir."transition2-3.mp4");
	
	# More explanation...
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame3.png -i ".$workingdir."lastpart.mp3 -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t $lastpartduration ".$workingdir."lastpart.mp4");

    # Transition
	system(" ffmpeg -y -i ".$workingdir."frame3.mp4 -i ".$workingdir."frame4.mp4 -filter_complex xfade=transition=slideup:duration=0.2:offset=0  -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t 0.25 ".$workingdir."transition3-4.mp4");
	
	# For details and sources, see leadstories.com, like and share!
	system("ffmpeg -y -loop 1 -i ".$workingdir."frame4.png -i ".$workingdir."details.mp3 -r 25 -c:v libx264 -c:a aac -b:a 192k -ar 48000 -pix_fmt yuv420p -t $detailsduration ".$workingdir."details.mp4");
	
	
	# TODO: if there is a pip video and an evidence image/video, split this into two parts
	# and apply the pip video only during the first half (up to transition 1-2)
	# Apply the evidence video to the second half if there is one
	# Then merge it all together.
	
	if($evidenceimage){
		open(FRAMESFILE, ">", $workingdir."frameslist.txt");
		print FRAMESFILE "file ./question.mp4\n";
		print FRAMESFILE "file ./funnysound.mp4\n";
		print FRAMESFILE "file ./transition1-2.mp4\n";
		close FRAMESFILE;
	}
	else {
		open(FRAMESFILE, ">", $workingdir."frameslist.txt");
		print FRAMESFILE "file ./question.mp4\n";
		print FRAMESFILE "file ./funnysound.mp4\n";
		print FRAMESFILE "file ./transition1-2.mp4\n";
		print FRAMESFILE "file ./secondsentence.mp4\n";
		print FRAMESFILE "file ./transition2-3.mp4\n";
		print FRAMESFILE "file ./lastpart.mp4\n";
		print FRAMESFILE "file ./transition3-4.mp4\n";
		print FRAMESFILE "file ./details.mp4\n";
		close FRAMESFILE;
	}
	
	system("ffmpeg -y -safe 0 -f concat -i ".$workingdir."frameslist.txt -c copy ".$workingdir."output.mp4");
	
	if ($pipvideourl){
		$command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."output.mp4";
		my $outputduration =  `$command`;
		chomp $outputduration;
		embed_pip_v2($pipvideourl, $outputduration);
	}
		
	# Add heart/star/thumb floating animation starting at $questionduration time
	
	$command = "ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 ".$workingdir."output.mp4";
	my $outputduration =  `$command`;
	chomp $outputduration;
	my $start = $questionduration;
	my $end = $start + 2;
	
	system("ffmpeg -y -i ".$workingdir."output.mp4 -loop 1 -i images/star.png -filter_complex '[1:0]format=yuva420p,scale=150:150,fade=in:st=".$start.":d=0.5:alpha=1,fade=out:st=".$end.":d=1:alpha=1 [ovr]; [0:0][ovr] overlay=700:1600+(".$start."-t)*200' -t $outputduration  -y ".$workingdir."star.mp4");
	system("ffmpeg -y -i ".$workingdir."star.mp4 -loop 1 -i images/heart.png -filter_complex '[1:0]format=yuva420p,scale=150:150,fade=in:st=".$start.":d=0.5:alpha=1,fade=out:st=".$end.":d=1:alpha=1 [ovr]; [0:0][ovr] overlay=250:1400+(".$start."-t)*140' -t $outputduration  -y ".$workingdir."heart.mp4");
	system("ffmpeg -y -i ".$workingdir."heart.mp4 -loop 1 -i images/thumbsup.png -filter_complex '[1:0]format=yuva420p,scale=150:150,fade=in:st=".$start.":d=0.5:alpha=1,fade=out:st=".$end.":d=1:alpha=1 [ovr]; [0:0][ovr] overlay=450:1700+(".$start."-t)*230' -t $outputduration  -y ".$workingdir."thumbsup.mp4");

	if($evidenceimage){
		open(FRAMESFILE, ">", $workingdir."frameslist.txt");
		print FRAMESFILE "file ./thumbsup.mp4\n";
		print FRAMESFILE "file ./secondsentence.mp4\n";
		print FRAMESFILE "file ./transition2-3.mp4\n";
		print FRAMESFILE "file ./lastpart.mp4\n";
		print FRAMESFILE "file ./transition3-4.mp4\n";
		print FRAMESFILE "file ./details.mp4\n";
		close FRAMESFILE;
		system("ffmpeg -y -safe 0 -f concat -i ".$workingdir."frameslist.txt -c copy ".$workingdir."thumbsup_with_evidence.mp4");
		system("mv ".$workingdir."thumbsup_with_evidence.mp4 ".$workingdir."thumbsup.mp4");
	}

	system("cp ".$workingdir."thumbsup.mp4 ".$workingdir."nosubs.mp4");

	open(FRAMESFILE, ">", $workingdir."frameslist2.txt");
	print FRAMESFILE "file ./subliminal.mp4\n";
	print FRAMESFILE "file ./nosubs.mp4\n";
	close FRAMESFILE;
	
	system("ffmpeg -y -safe 0 -f concat -i ".$workingdir."frameslist2.txt -c copy ".$workingdir."thumbsup.mp4");

	add_captions($workingdir."thumbsup.mp4", $fulltext);
	
	# Add music soundtrack at volume 0.07
	system("ffmpeg -y -i ".$workingdir."thumbsup.mp4 -i ".$musicdir."the-breaking-news-161462.mp3 -filter_complex '[0:a]volume=1[a0]; [1:a]volume=0.07[a1]; [a0][a1]amerge=inputs=2[a]' -map 0:v -map '[a]' -c:v copy -c:a aac -shortest -ac 2 ".$workingdir."final.mp4");
}

sub build_facequestion{
	my $questionvideo = shift;
	my $questionduration = shift;
	my $facepicture = pick_facepicture();
	# Build a video zooming out from the face
	print("ffmpeg -y -loop 1 -i $facesdir"."$facepicture -vf zoompan=z='5-on*0.05':x='iw/2-iw/zoom/2':y='ih/2-ih/zoom/2':d=1:s=1280x720:fps=30 -t ".$questionduration." ".$workingdir."face.mp4");
	system("ffmpeg -y -loop 1 -i $facesdir"."$facepicture -vf zoompan=z='5-on*0.05':x='iw/2-iw/zoom/2':y='ih/2-ih/zoom/2':d=1:s=1280x720:fps=30 -t ".$questionduration." ".$workingdir."face.mp4");
	# Overlay the video
	#TODO add blinking text here
	print ("ffmpeg -y -i ".$workingdir."face.mp4 -i ".$questionvideo." -filter_complex '[0]scale=-1:480 [pip]; [1][pip] overlay=main_w/2-overlay_w/2:170, drawtext=fontfile=fonts/Perfect DOS VGA 437.ttf:fontcolor=red:fontsize=60:text=Fact check incoming...:x=175:y=560:shadowx=4:shadowy=4:enable=lt(mod(t\\,1)\\,0.5)' -map 1:a -t ".$questionduration." ".$workingdir."question.mp4");

	system("ffmpeg -y -i ".$workingdir."face.mp4 -i ".$questionvideo." -filter_complex '[0]scale=-1:480 [pip]; [1][pip] overlay=main_w/2-overlay_w/2:170, drawtext=fontfile=fonts/Perfect DOS VGA 437.ttf:fontcolor=red:fontsize=60:text=Fact check incoming...:x=175:y=560:shadowx=4:shadowy=4:enable=lt(mod(t\\,1)\\,0.5)' -map 1:a -t ".$questionduration." ".$workingdir."question.mp4");
}

sub pick_facepicture{
	# Returns one random filename from the /faces folder
	# Read list of files
	opendir my $dir, "$facesdir" or die "Cannot open directory: $!";
	my @files = sort(readdir $dir);
	closedir $dir;
	# Remove .. and .
	shift @files;
	shift @files;
	# Return a random value
	return $files[rand(@files)];
}


sub embed_pip {
	my $pipvideourl = shift;
	my $questionduration = shift;
	# clear away old downloads
	system("rm ".$workingdir."pip.mp4");

	# attempt to download
	system("yt-dlp -o ".$workingdir."pip.mp4 '".$pipvideourl."'"); 
	if (-e $workingdir."pip.mp4.webm"){
		system("mv ".$workingdir."pip.mp4.webm ".$workingdir."pip.mp4");
	}
	
	if (-e $workingdir."pip.mp4"){
		# if it worked, merge with question TODO
		system("ffmpeg -y -i ".$workingdir."pip.mp4 -i ".$workingdir."question.mp4 -filter_complex '[0]scale=-1:600 [pip]; [1][pip] overlay=main_w/2-overlay_w/2:180' -map 1:a -t ".$questionduration." ".$workingdir."pipquestion.mp4");
		#mv question.mp4 old, mv pip in place
		system("mv ".$workingdir."question.mp4 ".$workingdir."oldquestion.mp4");
		system("mv ".$workingdir."pipquestion.mp4 ".$workingdir."question.mp4");
	}
}

sub embed_pip_v2 {
	my $pipvideourl = shift;
	my $outputduration = shift;
	# clear away old downloads
	system("rm ".$workingdir."pip.mp4");
	system("rm ".$workingdir."pip.mp4.webm");
	
	# attempt to download
	system("yt-dlp -o ".$workingdir."pip.mp4 '".$pipvideourl."'"); 
	if (-e $workingdir."pip.mp4.webm"){
		system("mv ".$workingdir."pip.mp4.webm ".$workingdir."pip.mp4");
	}
	
	if (-e $workingdir."pip.mp4"){
		# if it worked, merge with blurred output
		system("ffmpeg -y -i ".$workingdir."output.mp4 -filter_complex '[0:v]crop=1080:640:0:640,boxblur=10[fg]; [0:v][fg]overlay=0:640[v]' -map '[v]' -map 0:a -t $outputduration ".$workingdir."blurred.mp4");
		system("ffmpeg -y -stream_loop -1 -i ".$workingdir."pip.mp4 -i ".$workingdir."blurred.mp4 -filter_complex '[0]scale=-1:640 [pip]; [1][pip] overlay=main_w/2-overlay_w/2:640' -map 1:a -t ".$outputduration." ".$workingdir."pipoutput.mp4");
		system("mv ".$workingdir."output.mp4 ".$workingdir."oldoutput.mp4");
		system("mv ".$workingdir."pipoutput.mp4 ".$workingdir."output.mp4");
	}
}

sub get_screenshot_headlines{
	# Create a long, tall (800 wide, 4000 high) screenshot of the day's headlines.
	# Later convert this to a scrolling down animation with scroll speed determined by the number of fact checks & animation length
	# determined by the audio file of Botty reading the headlines.
	my $date = shift;
	my $url = "https://leadstories.com/hoax-alert/$date/";
	system("google-chrome --headless --silent --screenshot=".$workingdir."headlines.png --window-size=800,4000 --no-sandbox --hide-scrollbars '$url'");	
}

sub get_frames {
	my $factcheckurl = shift;
	my $frame0url = $factcheckurl;
	my $frame1url = $factcheckurl;
	my $frame2url = $factcheckurl;
	my $frame3url = $factcheckurl;
	$frame0url =~ s/(leadstories\.com\/)/$1frame0\//;
	$frame1url =~ s/(leadstories\.com\/)/$1frame1\//;
	$frame2url =~ s/(leadstories\.com\/)/$1frame2\//;
	$frame3url =~ s/(leadstories\.com\/)/$1frame3\//;
	system("google-chrome --headless --silent --screenshot=".$workingdir."frame0.png --window-size=1080,1920 --no-sandbox --hide-scrollbars '$frame0url'");
	system("google-chrome --headless --silent --screenshot=".$workingdir."frame1.png --window-size=1080,1920 --no-sandbox --hide-scrollbars '$frame1url'");
	system("google-chrome --headless --silent --screenshot=".$workingdir."frame2.png --window-size=1080,1920 --no-sandbox --hide-scrollbars '$frame2url'");
	system("google-chrome --headless --silent --screenshot=".$workingdir."frame3.png --window-size=1080,1920 --no-sandbox --hide-scrollbars '$frame3url'");
}

sub get_frames_v2 {
	my $factcheckurl = shift;
	my $frame0url = $factcheckurl;
	my $frame1url = $factcheckurl;
	my $frame2url = $factcheckurl;
	my $frame3url = $factcheckurl;
	my $frame4url = $factcheckurl;
	$frame0url =~ s/(leadstories\.com\/)/$1frame0_v2\//;
	$frame1url =~ s/(leadstories\.com\/)/$1frame1_v2\//;
	$frame2url =~ s/(leadstories\.com\/)/$1frame2_v2\//;
	$frame3url =~ s/(leadstories\.com\/)/$1frame3_v2\//;
	$frame4url =~ s/(leadstories\.com\/)/$1frame4_v2\//;
	system("google-chrome --headless --silent --screenshot=".$workingdir."frame0.png --window-size=1080,1920 --no-sandbox --hide-scrollbars '$frame0url'");
	system("google-chrome --headless --silent --screenshot=".$workingdir."frame1.png --window-size=1080,1920 --no-sandbox --hide-scrollbars '$frame1url'");
	system("google-chrome --headless --silent --screenshot=".$workingdir."frame2.png --window-size=1080,1920 --no-sandbox --hide-scrollbars '$frame2url'");
	system("google-chrome --headless --silent --screenshot=".$workingdir."frame3.png --window-size=1080,1920 --no-sandbox --hide-scrollbars '$frame3url'");
	system("google-chrome --headless --silent --screenshot=".$workingdir."frame4.png --window-size=1080,1920 --no-sandbox --hide-scrollbars '$frame4url'");
}

sub get_strings {
	my $factcheckurl = shift;
	my $stringsurl = $factcheckurl;
	$stringsurl =~ s/(leadstories\.com\/)/$1strings\//;
	print 'wget -O - "'.$stringsurl.'"'."\n";
	my $stringsjson = `wget -O - "$stringsurl"`;
	my $decoded_json = decode_json($stringsjson);
	$decoded_json->{link} = $factcheckurl;
	return $decoded_json;
}

sub generate_audio (){
	my $filename = shift;
	my $text = shift;
	my $mode = shift;
	print "$mode: $text\n";
	print TRANSCRIPT "$mode: $text\n";
	my $cachefile = $cachedir.md5_hex(Encode::encode_utf8($text)).$mode.".mp3";
	if (-f $cachefile){
		print "From cache!\n";
		system("cp $cachefile $workingdir".$filename.".mp3");
		return;
	}
	
	$text =~ s/\(archived\shere\)//gsi;
	$text =~ s/Lead\sStories/\"Lead Stories\"/gs;
	
	my %jsondata;
	
	$jsondata{"voice"}{"ssmlGender"} = "Female";
	$jsondata{"voice"}{"name"} = "en-US-Neural2-G";
	$jsondata{"voice"}{"languageCode"} = "en-US";
	
	if ($mode =~ /^fast/){
		$jsondata{"audioConfig"}{"speakingRate"} = "1.1";
		$jsondata{"audioConfig"}{"pitch"} = "-5";
	}
	
	if ($mode =~ /nederlands/){
		$jsondata{"voice"}{"ssmlGender"} = "Female";
		$jsondata{"voice"}{"name"} = "nl-NL-Wavenet-A";
		$jsondata{"voice"}{"languageCode"} = "nl-NL";
	}
	
	if ($mode =~ /espanol/){
		$jsondata{"voice"}{"ssmlGender"} = "Female";
		$jsondata{"voice"}{"name"} = "es-US-Neural2-A";
		$jsondata{"voice"}{"languageCode"} = "es-US";
	}		

	if ($mode =~ /xn--80aa2aboqjl0g5e/){
		$jsondata{"voice"}{"ssmlGender"} = "Female";
		$jsondata{"voice"}{"name"} = "uk-UA-Wavenet-A";
		$jsondata{"voice"}{"languageCode"} = "uk-UA";
	}
	
	if ($mode =~ /xn--romn-doa3r/){
		$jsondata{"voice"}{"ssmlGender"} = "Female";
		$jsondata{"voice"}{"name"} = "ro-RO-Wavenet-A";
		$jsondata{"voice"}{"languageCode"} = "ro-RO";
	}
	
	
	if ($mode eq "artiephone"){
		$jsondata{"audioConfig"}{"speakingRate"} = "1";
		$jsondata{"voice"}{"ssmlGender"} = "Male";
		$jsondata{"voice"}{"name"} = "en-US-Neural2-I";
		$jsondata{"audioConfig"}{"effectsProfileId"} = "telephony-class-application";
		#$jsondata{"audioConfig"}{"sampleRateHertz"} = 24400;
	}	
	
	if ($mode eq "artiefast"){
		$jsondata{"audioConfig"}{"speakingRate"} = "1.1";
		$jsondata{"voice"}{"ssmlGender"} = "Male";
		$jsondata{"voice"}{"name"} = "en-US-Neural2-I";
	}
	
	if ($mode eq "artie"){
		$jsondata{"audioConfig"}{"speakingRate"} = "1";
		$jsondata{"voice"}{"ssmlGender"} = "Male";
		$jsondata{"voice"}{"name"} = "en-US-Neural2-I";
	}
	
	if ($mode eq "bottyfast"){
		$jsondata{"audioConfig"}{"speakingRate"} = "1.1";
		#$jsondata{"audioConfig"}{"pitch"} = "+5";
		$jsondata{"voice"}{"ssmlGender"} = "Female";
		$jsondata{"voice"}{"name"} = "en-US-Neural2-H";
	}
	
	if ($mode eq "bottyphone"){
		$jsondata{"audioConfig"}{"speakingRate"} = "1";
		$jsondata{"voice"}{"ssmlGender"} = "Female";
		$jsondata{"voice"}{"name"} = "en-US-Neural2-H";
		$jsondata{"audioConfig"}{"effectsProfileId"} = "telephony-class-application";
		#$jsondata{"audioConfig"}{"sampleRateHertz"} = 24400;
	}
	
	if ($mode eq "botty"){
		$jsondata{"audioConfig"}{"speakingRate"} = "1";
		#$jsondata{"audioConfig"}{"pitch"} = "+5";
		$jsondata{"voice"}{"ssmlGender"} = "Female";
		$jsondata{"voice"}{"name"} = "en-US-Neural2-H";
	}
	
	if ($mode eq "arthurphone"){
		$jsondata{"audioConfig"}{"speakingRate"} = "0.9";
		$jsondata{"audioConfig"}{"effectsProfileId"} = "telephony-class-application";
		$jsondata{"voice"}{"name"} = "en-GB-Neural2-B";
		$jsondata{"voice"}{"languageCode"} = "en-GB";
		$jsondata{"voice"}{"ssmlGender"} = "Male";
	}
	
	if ($mode eq "arthurphonefast"){
		$jsondata{"audioConfig"}{"speakingRate"} = "1.15";
		$jsondata{"audioConfig"}{"effectsProfileId"} = "telephony-class-application";
		$jsondata{"voice"}{"name"} = "en-GB-Neural2-B";
		$jsondata{"voice"}{"languageCode"} = "en-GB";
		$jsondata{"voice"}{"ssmlGender"} = "Male";
	}
	
	$jsondata{"audioConfig"}{"audioEncoding"} = "MP3";
	$jsondata{"input"}{"text"} = $text;
	my $json = encode_json (\%jsondata);

	open(JSONFILE, ">", "/tmp/jsonfile.txt");
	print JSONFILE $json;
	close JSONFILE;

	my $audiocontent;
	my $failed;

	while (! $audiocontent){
	  if ($failed){print "Google API failed $failed times so far\n"; exit; sleep 5};
	# Send to Google TTS API
	`curl -s -H "Content-Type: application/json; charset=utf-8" -d @/tmp/jsonfile.txt "https://texttospeech.googleapis.com/v1beta1/text:synthesize?key=AIzaSyBLwdIl5e7NgLEs4SXwjpriK5NiVgwVKMc" >/tmp/voicejson.txt`;

	# Parse json
	open(my $fh, '<:encoding(UTF-8)', "/tmp/voicejson.txt")  or die "Could not open file /tmp/voicejson.txt";
	my $returnjson;
	while (<$fh>) {
	  $returnjson .= $_;
	}
	my $decoded_json = decode_json($returnjson);

	$audiocontent = $decoded_json->{"audioContent"};
	if (! $audiocontent){print $decoded_json->{"error"}->{"message"}."\n";$failed++}
	}

	# Save base64 audio
	open(BASE64FILE, ">", "/tmp/audiobase64.txt");
	print BASE64FILE $audiocontent;
	close BASE64FILE;

	# Convert, cache & save
	system("base64 /tmp/audiobase64.txt --decode > $workingdir".$filename.".mp3");
	system("cp ".$workingdir.$filename.".mp3 $cachefile");
}

sub get_hashtags {
	my $title = shift;
	my $fulltext = shift;
	my %gptdata;
	my %messages;
	my $decoded_data;

	$gptdata{model}="gpt-3.5-turbo";
	$messages{role} = "user";
	$gptdata{temperature} = 0;
	
	my $prompt = "You are the social media manager of a fact checking website.  Based on the summary of the fact check that follows, output four hashtags mentioning names, entities, places etc. found in the text.\n$title\n$fulltext";
	
	$messages{content} = $prompt;
	$gptdata{messages} = [\%messages];

	my $json = to_json(\%gptdata);
	open(FH, ">".$workingdir."prompt.txt");
	print FH $json;
	my $command = "curl -m 10 https://api.openai.com/v1/chat/completions   -H 'Content-Type: application/json'   -H 'Authorization: Bearer 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' -d \@".$workingdir."/prompt.txt";

	my $gptjson = `$command`;
	open(FHGPT, ">".$workingdir."gptjson.txt");
	print FHGPT $gptjson;
	close FHGPT;
	open(FHGPT, "<".$workingdir."gptjson.txt");
	my $gptjson;
	while(<FHGPT>){
		$gptjson .= $_;
	}

	eval { $decoded_data = decode_json($gptjson);};
	unless ($decoded_data){
		print "Problem with JSON from gpt\n";
	return "";
	}
	my $extrahashtags = "#CheckTok ";
	unless ($decoded_data->{choices}->[0]->{message}->{content} =~ /factcheck/gsi){
		$extrahashtags .= "#FactCheck ";
	}
	return $extrahashtags.$decoded_data->{choices}->[0]->{message}->{content};
}

sub generate_quip {
	my $title = shift;
	my $fulltext = shift;
	
	my $cachefile = $cachedir.md5_hex(Encode::encode_utf8($fulltext))."-quip-hot.txt";
	if (-f $cachefile){
		open(CACHEQUIP, "<".$cachefile);
		my $cachedquip;
		while(<CACHEQUIP>){
			$cachedquip .= $_;
		}
		close CACHEQUIP;	
		return $cachedquip;
	}
	
	my %gptdata;
	my %messages;
	my $decoded_data;

	$gptdata{model}="gpt-3.5-turbo";
	$messages{role} = "user";
	$gptdata{temperature} = 0.11;
	
	#my $prompt = "You are a radio talk show host.  Read the following factcheck and come back with a funny quip about it.\n$title\n$fulltext";
	#my $prompt = "You are a radio talk show host. Your co-host has just read a fact check and you need to respond to it with a one or two line quip on air, without hashtags or emojis. If the fact check is about a politician, politics, war, tragedy, racism, death or violence, respond with applicable media literacy advice, for example that people should google things first or that they should verify the date and location before sharing something. If the fact check is about illness, disease, health advice or medical subjects, respond by urging listeners to go to a doctor or medical professional for advice, rather than believe just any advice on the Internet. If the fact check is about something else, try to make the quip funny. Under no circumstance should the quip be racist, politically charged, insulting, insensitive or rude. This is the fact check:\n$title\n$fulltext";
	my $prompt = "You are a radio talk show host. Your co-host has just read a fact check and you need to respond to it with a one or two line quip on air, without hashtags or emojis. If the fact check is about a politician, politics, war, tragedy, racism, death or violence, respond with applicable media literacy advice, for example that people should google things first or that they should verify the date and location before sharing something. If the fact check is about illness, disease, health advice or medical subjects, respond by urging listeners to go to a doctor or medical professional for advice, rather than believe just any advice on the Internet. If the fact check is about something else, make the quip funny. Under no circumstance should the quip be racist, politically charged, insulting, insensitive or rude.  Do not use the phrase 'fact check a fact check' or something similar.  Do not use the phrase 'too good to be true'.  This is the fact check:\n$title\n$fulltext";

	$messages{content} = $prompt;
	$gptdata{messages} = [\%messages];

	my $json = to_json(\%gptdata);
	open(FH, ">".$workingdir."prompt.txt");
	print FH $json;
	my $command = "curl -m 15 -s https://api.openai.com/v1/chat/completions   -H 'Content-Type: application/json'   -H 'Authorization: Bearer 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' -d \@".$workingdir."/prompt.txt";
	my $gptjson = `$command`;
	
	# Retry up to three times
	if ($gptjson =~ /cf_bad_gateway/gsi){
		sleep 2;
		$gptjson = `$command`;
	}
	if ($gptjson =~ /cf_bad_gateway/gsi){
		sleep 4;
		$gptjson = `$command`;
	}	
	if ($gptjson =~ /cf_bad_gateway/gsi){
		sleep 6;
		$gptjson = `$command`;
	}
	
	open(FHGPT, ">".$workingdir."gptjson.txt");
	print FHGPT $gptjson;
	close FHGPT;
	open(FHGPT, "<".$workingdir."gptjson.txt");
	my $gptjson;
	while(<FHGPT>){
		$gptjson .= $_;
	}

	eval { $decoded_data = decode_json($gptjson);};
	unless ($decoded_data){
		print "Problem with JSON from gpt\n";
		return "I'm speechless.  Even ChatGPT doesn't know what to say here.";
	}
	if ($decoded_data->{choices}->[0]->{message}->{content}){
		open(CACHEQUIP, ">".$cachefile);
		print CACHEQUIP $decoded_data->{choices}->[0]->{message}->{content};
		close CACHEQUIP;
		return $decoded_data->{choices}->[0]->{message}->{content};
	}
	return "I'm speechless.  Even ChatGPT doesn't know what to say here.";
}

sub generate_topics {

	my $fulltext = shift;
	
	my $cachefile = $cachedir.md5_hex($fulltext)."-topics.txt";
	#if (-f $cachefile){
	if (0){
		open(CACHE, "<".$cachedir.md5_hex($fulltext)."-topics.txt");
		my $cachedtopics;
		while(<CACHE>){
			$cachedtopics .= $_;
		}
		close CACHE;	
		return $cachedtopics;
	}
	
	my %gptdata;
	my %messages;
	my $decoded_data;

	$gptdata{model}="gpt-3.5-turbo";
	$messages{role} = "user";
	$gptdata{temperature} = 0.1;
	
	my $prompt = "Produce a single line, comma separated list of the most important entities, people and topics mentioned in following headlines:\n$fulltext";
	
	$messages{content} = $prompt;
	$gptdata{messages} = [\%messages];

	my $json = to_json(\%gptdata);
	open(FH, ">".$workingdir."topicsprompt.txt");
	print FH $json;
	my $command = "curl -s https://api.openai.com/v1/chat/completions   -H 'Content-Type: application/json'   -H 'Authorization: Bearer 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' -d \@".$workingdir."/topicsprompt.txt";
	my $gptjson = `$command`;
	
	# Retry up to three times
	if ($gptjson =~ /cf_bad_gateway/gsi){
		sleep 2;
		$gptjson = `$command`;
	}
	if ($gptjson =~ /cf_bad_gateway/gsi){
		sleep 4;
		$gptjson = `$command`;
	}	
	if ($gptjson =~ /cf_bad_gateway/gsi){
		sleep 6;
		$gptjson = `$command`;
	}
	
	open(FHGPT, ">".$workingdir."gptjson.txt");
	print FHGPT $gptjson;
	close FHGPT;
	open(FHGPT, "<".$workingdir."gptjson.txt");
	my $gptjson;
	while(<FHGPT>){
		$gptjson .= $_;
	}

	eval { $decoded_data = decode_json($gptjson);};
	unless ($decoded_data){
		print "Problem with JSON from gpt\n";
		return "I'm speechless.  Even ChatGPT doesn't know what to say here.";
	}
	if ($decoded_data->{choices}->[0]->{message}->{content}){
		open(CACHE, ">".$cachedir.md5_hex($fulltext)."-topics.txt");
		print CACHE $decoded_data->{choices}->[0]->{message}->{content};
		close CACHE;
	
		open(FHGPT, ">".$workingdir."topics.txt");
		print FHGPT $decoded_data->{choices}->[0]->{message}->{content};
		close(FHGPT);
		return $decoded_data->{choices}->[0]->{message}->{content};
	}
	return "I'm speechless.  Even ChatGPT doesn't know what to say here.";
}

sub add_captions {
	my $file = shift;
	my $text = shift;
	my $textcolor = shift;
	my $textsize =  shift;
	my $nocache = shift;
	
	# Generate drawtext commands
	my $json = get_transcript($file,$text,$nocache);
	my $decoded_data;
	eval { $decoded_data = decode_json($json);};
	
	my @filters;
	
	my $shadowsize = 10;
	my $fontsize = 150;
	my $fontcolor = "#ae1b16";
	my $bordercolor = "white";
	my $previousend;
	my $counter = 0;
	
	if ($textcolor){$fontcolor=$textcolor};
	if ($textsize){$fontsize=$textsize};
	
	my @words = split(/\s+/,$decoded_data->{text});
	my $skipped = 0;
	foreach my $word (@{$decoded_data->{words}}){
		
		$word->{word} =~ s/Knot/NOT/gsi;
		$word->{word} =~ s/Bodhi/Bottie/gsi;
		$word->{word} =~ s/Artificial/Artie Ficial/gsi;
		
		unless ($word->{start}){$word->{start} = 0.05} # Don't start a word in the first frame, it messess up the video thumbnail
		if ($word->{start} < $previousend){$word->{start} = $previousend} # Make sure words don't overlap
		if (($word->{end} - $word->{start}) < 0.1){$word->{end} = $word->{end} + 0.1; print "Boosting $word->{word}\n";} # Make sure words are shown at least a tenth of a second.
		$previousend = $word->{end};
		
		my $punctuationless = uc($words[$counter]);
		$punctuationless =~ s/[\.,\!\?]//gsi;
		my $uppercaseword = uc($word->{word});
		
		
		# If uppercaseword & punctuationless are the same, use the word from the text (potentially with punctuation)
		if ($uppercaseword eq $punctuationless){
			$word->{word} = uc($words[$counter++]);
			$skipped = 0;
		}
		# Else use the transcribed individual word without punctuation and skip ahead in the sentence by one word, once, so we hopefully can pick up
		# the thread from there.
		else {
			$word->{word} = uc($word->{word});
			unless ($skipped){$counter = $counter + 1};
			$skipped = 1;
		}
		

		if ($word->{word} =~ /\'/){
			open(WORD, ">:encoding(utf-8)", "".$workingdir.$counter."-word.txt");
			print WORD $word->{word};
			close WORD;
			push @filters, "drawtext=fontfile=fonts/impact.ttf:textfile=".$workingdir.$counter."-word.txt:x=(w-text_w)/2:y=(h-85)/2:fontsize=".$fontsize.":borderw=".$shadowsize.":fontcolor=".$fontcolor.":bordercolor=".$bordercolor.":enable='between(t,$word->{start},$word->{end})'";
		}
		else {
			push @filters, "drawtext=fontfile=fonts/impact.ttf:text='$word->{word}':x=(w-text_w)/2:y=(h-85)/2:fontsize=".$fontsize.":borderw=".$shadowsize.":fontcolor=".$fontcolor.":bordercolor=".$bordercolor.":enable='between(t,$word->{start},$word->{end})'";
		}
		
	}
	
	open(FILTERS, ">:encoding(utf-8)", "".$workingdir."filters.txt");
	print FILTERS join(",\n", @filters);
	close (FILTERS);

	my $command = "ffmpeg -y -i $file -filter_complex_script '".$workingdir."filters.txt' ".$workingdir."subs.mp4";
	print $command;
	system($command);
	system("mv $file old-".$file);
	system("mv ".$workingdir."subs.mp4 ".$file);
	# Example:
	# ffmpeg -y -loop 1 -i tmp/frame0.png -i tmp/question.mp3 -t 4 -vf "drawtext=text='Does':x=300:y=600:fontcolor=white:fontsize=100:enable='between(t,0,0.35)',drawtext=text='a':x=300:y=600:fontcolor=white:fontsize=100:enable='between(t,0.4,0.75)',drawtext=text='video':x=300:y=600:fontcolor=white:fontsize=100:enable='between(t,0.8,1.15)',drawtext=text='prove':x=300:y=600:fontcolor=white:fontsize=100:enable='between(t,1.2,1.6)'" output/subs.mp4
	# ffmpeg -y -loop 1 -i tmp/frame0.png -i tmp/question.mp3 -t 4 -filter_complex_script 'filters.txt' output/subs.mp4
	# 		"drawtext=fontfile=fonts/HeadlinerNo45.ttf:text='".$human_readable_date."':fontcolor=white:x=310:y=630:x=(w-text_w)/2:y=630:fontsize=120:shadowx=8:shadowy=8",
}

sub get_transcript {
	my $file = shift;
	my $text = shift;
	my $nocache = shift;
	# Cache based on MD5 hash (careful with encoding)
	my $cachefile = $cachedir.md5_hex(Encode::encode_utf8($text))."-transcription.txt";
	unless ($nocache){
	if (-f $cachefile){
		open(CACHE, "<".$cachefile);
		my $cache;
		while(<CACHE>){
			$cache .= $_;
		}
		close CACHE;	
		return $cache;
	}
	}
	
	# Send to Whisper
	my $command = "curl -s https://api.openai.com/v1/audio/transcriptions -H 'Content-Type: multipart/form-data' -H 'Authorization: Bearer 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' -F file='\@".$file."' -F model='whisper-1' -F response_format='verbose_json' -F timestamp_granularities[]='word' ";
	print $command."\n";
	my $json = `$command`;
	
	my $decoded_data;
	eval { $decoded_data = decode_json($json);};
	
	# Return JSON
	
	if ($decoded_data->{text}){
		open(CACHE, ">".$cachefile);
		print CACHE $json;
		close CACHE;
		return $json;
	}
}



return 1;
