OpenShot Library | libopenshot  0.3.0
Clip.cpp
Go to the documentation of this file.
1 
9 // Copyright (c) 2008-2019 OpenShot Studios, LLC
10 //
11 // SPDX-License-Identifier: LGPL-3.0-or-later
12 
13 #include "Clip.h"
14 
15 #include "AudioResampler.h"
16 #include "Exceptions.h"
17 #include "FFmpegReader.h"
18 #include "FrameMapper.h"
19 #include "QtImageReader.h"
20 #include "ChunkReader.h"
21 #include "DummyReader.h"
22 #include "Timeline.h"
23 #include "ZmqLogger.h"
24 
25 #ifdef USE_IMAGEMAGICK
26  #include "MagickUtilities.h"
27  #include "ImageReader.h"
28  #include "TextReader.h"
29 #endif
30 
31 #include <Qt>
32 
33 using namespace openshot;
34 
35 // Init default settings for a clip
37 {
38  // Init clip settings
39  Position(0.0);
40  Layer(0);
41  Start(0.0);
42  ClipBase::End(0.0);
44  scale = SCALE_FIT;
48  waveform = false;
50  parentObjectId = "";
51 
52  // Init scale curves
53  scale_x = Keyframe(1.0);
54  scale_y = Keyframe(1.0);
55 
56  // Init location curves
57  location_x = Keyframe(0.0);
58  location_y = Keyframe(0.0);
59 
60  // Init alpha
61  alpha = Keyframe(1.0);
62 
63  // Init time & volume
64  time = Keyframe(1.0);
65  volume = Keyframe(1.0);
66 
67  // Init audio waveform color
68  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
69 
70  // Init shear and perspective curves
71  shear_x = Keyframe(0.0);
72  shear_y = Keyframe(0.0);
73  origin_x = Keyframe(0.5);
74  origin_y = Keyframe(0.5);
75  perspective_c1_x = Keyframe(-1.0);
76  perspective_c1_y = Keyframe(-1.0);
77  perspective_c2_x = Keyframe(-1.0);
78  perspective_c2_y = Keyframe(-1.0);
79  perspective_c3_x = Keyframe(-1.0);
80  perspective_c3_y = Keyframe(-1.0);
81  perspective_c4_x = Keyframe(-1.0);
82  perspective_c4_y = Keyframe(-1.0);
83 
84  // Init audio channel filter and mappings
85  channel_filter = Keyframe(-1.0);
86  channel_mapping = Keyframe(-1.0);
87 
88  // Init audio and video overrides
89  has_audio = Keyframe(-1.0);
90  has_video = Keyframe(-1.0);
91 
92  // Initialize the attached object and attached clip as null pointers
93  parentTrackedObject = nullptr;
94  parentClipObject = NULL;
95 
96  // Init reader info struct
98 }
99 
100 // Init reader info details
102  if (reader) {
103  // Init rotation (if any)
105 
106  // Initialize info struct
107  info = reader->info;
108  }
109 }
110 
111 // Init reader's rotation (if any)
113  // Dont init rotation if clip has keyframes
114  if (rotation.GetCount() > 0)
115  return;
116 
117  // Init rotation
118  if (reader && reader->info.metadata.count("rotate") > 0) {
119  // Use reader metadata rotation (if any)
120  // This is typical with cell phone videos filmed in different orientations
121  try {
122  float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
123  rotation = Keyframe(rotate_metadata);
124  } catch (const std::exception& e) {}
125  }
126  else
127  // Default no rotation
128  rotation = Keyframe(0.0);
129 }
130 
131 // Default Constructor for a clip
132 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
133 {
134  // Init all default settings
135  init_settings();
136 }
137 
138 // Constructor with reader
139 Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
140 {
141  // Init all default settings
142  init_settings();
143 
144  // Open and Close the reader (to set the duration of the clip)
145  Open();
146  Close();
147 
148  // Update duration and set parent
149  if (reader) {
150  ClipBase::End(reader->info.duration);
151  reader->ParentClip(this);
152  // Init reader info struct
154  }
155 }
156 
157 // Constructor with filepath
158 Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
159 {
160  // Init all default settings
161  init_settings();
162 
163  // Get file extension (and convert to lower case)
164  std::string ext = get_file_extension(path);
165  std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
166 
167  // Determine if common video formats
168  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
169  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob")
170  {
171  try
172  {
173  // Open common video format
174  reader = new openshot::FFmpegReader(path);
175 
176  } catch(...) { }
177  }
178  if (ext=="osp")
179  {
180  try
181  {
182  // Open common video format
183  reader = new openshot::Timeline(path, true);
184 
185  } catch(...) { }
186  }
187 
188 
189  // If no video found, try each reader
190  if (!reader)
191  {
192  try
193  {
194  // Try an image reader
195  reader = new openshot::QtImageReader(path);
196 
197  } catch(...) {
198  try
199  {
200  // Try a video reader
201  reader = new openshot::FFmpegReader(path);
202 
203  } catch(...) { }
204  }
205  }
206 
207  // Update duration and set parent
208  if (reader) {
209  ClipBase::End(reader->info.duration);
210  reader->ParentClip(this);
211  allocated_reader = reader;
212  // Init reader info struct
214  }
215 }
216 
217 // Destructor
219 {
220  // Delete the reader if clip created it
221  if (allocated_reader) {
222  delete allocated_reader;
223  allocated_reader = NULL;
224  reader = NULL;
225  }
226 
227  // Close the resampler
228  if (resampler) {
229  delete resampler;
230  resampler = NULL;
231  }
232 }
233 
234 // Attach clip to bounding box
235 void Clip::AttachToObject(std::string object_id)
236 {
237  // Search for the tracked object on the timeline
238  Timeline* parentTimeline = (Timeline *) ParentTimeline();
239 
240  if (parentTimeline) {
241  // Create a smart pointer to the tracked object from the timeline
242  std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
243  Clip* clipObject = parentTimeline->GetClip(object_id);
244 
245  // Check for valid tracked object
246  if (trackedObject){
247  SetAttachedObject(trackedObject);
248  }
249  else if (clipObject) {
250  SetAttachedClip(clipObject);
251  }
252  }
253  return;
254 }
255 
256 // Set the pointer to the trackedObject this clip is attached to
257 void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
258  parentTrackedObject = trackedObject;
259  return;
260 }
261 
262 // Set the pointer to the clip this clip is attached to
263 void Clip::SetAttachedClip(Clip* clipObject){
264  parentClipObject = clipObject;
265  return;
266 }
267 
269 void Clip::Reader(ReaderBase* new_reader)
270 {
271  // Delete previously allocated reader (if not related to new reader)
272  // FrameMappers that point to the same allocated reader are ignored
273  bool is_same_reader = false;
274  if (new_reader && allocated_reader) {
275  if (new_reader->Name() == "FrameMapper") {
276  // Determine if FrameMapper is pointing at the same allocated ready
277  FrameMapper* clip_mapped_reader = (FrameMapper*) new_reader;
278  if (allocated_reader == clip_mapped_reader->Reader()) {
279  is_same_reader = true;
280  }
281  }
282  }
283  // Clear existing allocated reader (if different)
284  if (allocated_reader && !is_same_reader) {
285  reader->Close();
286  allocated_reader->Close();
287  delete allocated_reader;
288  reader = NULL;
289  allocated_reader = NULL;
290  }
291 
292  // set reader pointer
293  reader = new_reader;
294 
295  // set parent
296  if (reader) {
297  reader->ParentClip(this);
298 
299  // Init reader info struct
301  }
302 }
303 
306 {
307  if (reader)
308  return reader;
309  else
310  // Throw error if reader not initialized
311  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
312 }
313 
314 // Open the internal reader
316 {
317  if (reader)
318  {
319  // Open the reader
320  reader->Open();
321  is_open = true;
322 
323  // Copy Reader info to Clip
324  info = reader->info;
325 
326  // Set some clip properties from the file reader
327  if (end == 0.0)
328  ClipBase::End(reader->info.duration);
329  }
330  else
331  // Throw error if reader not initialized
332  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
333 }
334 
335 // Close the internal reader
337 {
338  is_open = false;
339  if (reader) {
340  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
341 
342  // Close the reader
343  reader->Close();
344  }
345  else
346  // Throw error if reader not initialized
347  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
348 }
349 
350 // Get end position of clip (trim end of video), which can be affected by the time curve.
351 float Clip::End() const
352 {
353  // if a time curve is present, use its length
354  if (time.GetCount() > 1)
355  {
356  // Determine the FPS fo this clip
357  float fps = 24.0;
358  if (reader)
359  // file reader
360  fps = reader->info.fps.ToFloat();
361  else
362  // Throw error if reader not initialized
363  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
364 
365  return float(time.GetLength()) / fps;
366  }
367  else
368  // just use the duration (as detected by the reader)
369  return end;
370 }
371 
372 // Override End() position
373 void Clip::End(float value) {
374  ClipBase::End(value);
375 }
376 
377 // Create an openshot::Frame object for a specific frame number of this reader.
378 std::shared_ptr<Frame> Clip::GetFrame(int64_t frame_number)
379 {
380  // Check for open reader (or throw exception)
381  if (!is_open)
382  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
383 
384  if (reader)
385  {
386  // Adjust out of bounds frame number
387  frame_number = adjust_frame_number_minimum(frame_number);
388 
389  // Get the original frame and pass it to GetFrame overload
390  std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
391  return GetFrame(original_frame, frame_number, NULL);
392  }
393  else
394  // Throw error if reader not initialized
395  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
396 }
397 
398 // Create an openshot::Frame object for a specific frame number of this reader.
399 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number)
400 {
401  // Check for open reader (or throw exception)
402  if (!is_open)
403  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
404 
405  if (reader)
406  {
407  // Adjust out of bounds frame number
408  frame_number = adjust_frame_number_minimum(frame_number);
409 
410  // Get the original frame and pass it to GetFrame overload
411  std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
412  return GetFrame(original_frame, frame_number, NULL);
413  }
414  else
415  // Throw error if reader not initialized
416  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
417 }
418 
419 // Use an existing openshot::Frame object and draw this Clip's frame onto it
420 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number, openshot::TimelineInfoStruct* options)
421 {
422  // Check for open reader (or throw exception)
423  if (!is_open)
424  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
425 
426  if (reader)
427  {
428  // Adjust out of bounds frame number
429  frame_number = adjust_frame_number_minimum(frame_number);
430 
431  // Is a time map detected
432  int64_t new_frame_number = frame_number;
433  int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(frame_number));
434  if (time.GetLength() > 1)
435  new_frame_number = time_mapped_number;
436 
437  // Now that we have re-mapped what frame number is needed, go and get the frame pointer
438  std::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);
439 
440  // Get time mapped frame number (used to increase speed, change direction, etc...)
441  // TODO: Handle variable # of samples, since this resamples audio for different speeds (only when time curve is set)
442  get_time_mapped_frame(original_frame, new_frame_number);
443  // Return the frame's number so the correct keyframes are applied.
444  original_frame->number = frame_number;
445 
446  // Apply local effects to the frame (if any)
447  apply_effects(original_frame);
448 
449  // Apply global timeline effects (i.e. transitions & masks... if any)
450  if (timeline != NULL && options != NULL) {
451  if (options->is_top_clip) {
452  // Apply global timeline effects (only to top clip... if overlapping, pass in timeline frame number)
453  Timeline* timeline_instance = (Timeline*) timeline;
454  original_frame = timeline_instance->apply_effects(original_frame, background_frame->number, Layer());
455  }
456  }
457 
458  // Apply keyframe / transforms
459  apply_keyframes(original_frame, background_frame->GetImage());
460 
461  // Return processed 'frame'
462  return original_frame;
463  }
464  else
465  // Throw error if reader not initialized
466  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
467 }
468 
469 // Look up an effect by ID
470 openshot::EffectBase* Clip::GetEffect(const std::string& id)
471 {
472  // Find the matching effect (if any)
473  for (const auto& effect : effects) {
474  if (effect->Id() == id) {
475  return effect;
476  }
477  }
478  return nullptr;
479 }
480 
481 // Get file extension
482 std::string Clip::get_file_extension(std::string path)
483 {
484  // return last part of path
485  return path.substr(path.find_last_of(".") + 1);
486 }
487 
488 // Reverse an audio buffer
489 void Clip::reverse_buffer(juce::AudioBuffer<float>* buffer)
490 {
491  int number_of_samples = buffer->getNumSamples();
492  int channels = buffer->getNumChannels();
493 
494  // Reverse array (create new buffer to hold the reversed version)
495  auto *reversed = new juce::AudioBuffer<float>(channels, number_of_samples);
496  reversed->clear();
497 
498  for (int channel = 0; channel < channels; channel++)
499  {
500  int n=0;
501  for (int s = number_of_samples - 1; s >= 0; s--, n++)
502  reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
503  }
504 
505  // Copy the samples back to the original array
506  buffer->clear();
507  // Loop through channels, and get audio samples
508  for (int channel = 0; channel < channels; channel++)
509  // Get the audio samples for this channel
510  buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
511 
512  delete reversed;
513  reversed = nullptr;
514 }
515 
516 // Adjust the audio and image of a time mapped frame
517 void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
518 {
519  // Check for valid reader
520  if (!reader)
521  // Throw error if reader not initialized
522  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
523 
524  // Check for a valid time map curve
525  if (time.GetLength() > 1)
526  {
527  const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
528 
529  // create buffer and resampler
530  juce::AudioBuffer<float> *samples = nullptr;
531  if (!resampler)
532  resampler = new AudioResampler();
533 
534  // Get new frame number
535  int new_frame_number = frame->number;
536 
537  // Get delta (difference in previous Y value)
538  int delta = int(round(time.GetDelta(frame_number)));
539 
540  // Init audio vars
541  int channels = reader->info.channels;
542  int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
543 
544  // Only resample audio if needed
545  if (reader->info.has_audio) {
546  // Determine if we are speeding up or slowing down
547  if (time.GetRepeatFraction(frame_number).den > 1) {
548  // SLOWING DOWN AUDIO
549  // Resample data, and return new buffer pointer
550  juce::AudioBuffer<float> *resampled_buffer = nullptr;
551 
552  // SLOW DOWN audio (split audio)
553  samples = new juce::AudioBuffer<float>(channels, number_of_samples);
554  samples->clear();
555 
556  // Loop through channels, and get audio samples
557  for (int channel = 0; channel < channels; channel++)
558  // Get the audio samples for this channel
559  samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
560  number_of_samples, 1.0f);
561 
562  // Reverse the samples (if needed)
563  if (!time.IsIncreasing(frame_number))
564  reverse_buffer(samples);
565 
566  // Resample audio to be X times slower (where X is the denominator of the repeat fraction)
567  resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den);
568 
569  // Resample the data (since it's the 1st slice)
570  resampled_buffer = resampler->GetResampledBuffer();
571 
572  // Just take the samples we need for the requested frame
573  int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1));
574  if (start > 0)
575  start -= 1;
576  for (int channel = 0; channel < channels; channel++)
577  // Add new (slower) samples, to the frame object
578  frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start),
579  number_of_samples, 1.0f);
580 
581  // Clean up
582  resampled_buffer = nullptr;
583 
584  }
585  else if (abs(delta) > 1 && abs(delta) < 100) {
586  int start = 0;
587  if (delta > 0) {
588  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
589  int total_delta_samples = 0;
590  for (int delta_frame = new_frame_number - (delta - 1);
591  delta_frame <= new_frame_number; delta_frame++)
592  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
593  reader->info.sample_rate,
594  reader->info.channels);
595 
596  // Allocate a new sample buffer for these delta frames
597  samples = new juce::AudioBuffer<float>(channels, total_delta_samples);
598  samples->clear();
599 
600  // Loop through each frame in this delta
601  for (int delta_frame = new_frame_number - (delta - 1);
602  delta_frame <= new_frame_number; delta_frame++) {
603  // buffer to hold detal samples
604  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
605  auto *delta_samples = new juce::AudioBuffer<float>(channels,
606  number_of_delta_samples);
607  delta_samples->clear();
608 
609  for (int channel = 0; channel < channels; channel++)
610  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
611  number_of_delta_samples, 1.0f);
612 
613  // Reverse the samples (if needed)
614  if (!time.IsIncreasing(frame_number))
615  reverse_buffer(delta_samples);
616 
617  // Copy the samples to
618  for (int channel = 0; channel < channels; channel++)
619  // Get the audio samples for this channel
620  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
621  number_of_delta_samples, 1.0f);
622 
623  // Clean up
624  delete delta_samples;
625  delta_samples = nullptr;
626 
627  // Increment start position
628  start += number_of_delta_samples;
629  }
630  }
631  else {
632  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
633  int total_delta_samples = 0;
634  for (int delta_frame = new_frame_number - (delta + 1);
635  delta_frame >= new_frame_number; delta_frame--)
636  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
637  reader->info.sample_rate,
638  reader->info.channels);
639 
640  // Allocate a new sample buffer for these delta frames
641  samples = new juce::AudioBuffer<float>(channels, total_delta_samples);
642  samples->clear();
643 
644  // Loop through each frame in this delta
645  for (int delta_frame = new_frame_number - (delta + 1);
646  delta_frame >= new_frame_number; delta_frame--) {
647  // buffer to hold delta samples
648  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
649  auto *delta_samples = new juce::AudioBuffer<float>(channels,
650  number_of_delta_samples);
651  delta_samples->clear();
652 
653  for (int channel = 0; channel < channels; channel++)
654  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
655  number_of_delta_samples, 1.0f);
656 
657  // Reverse the samples (if needed)
658  if (!time.IsIncreasing(frame_number))
659  reverse_buffer(delta_samples);
660 
661  // Copy the samples to
662  for (int channel = 0; channel < channels; channel++)
663  // Get the audio samples for this channel
664  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
665  number_of_delta_samples, 1.0f);
666 
667  // Clean up
668  delete delta_samples;
669  delta_samples = NULL;
670 
671  // Increment start position
672  start += number_of_delta_samples;
673  }
674  }
675 
676  // Resample audio to be X times faster (where X is the delta of the repeat fraction)
677  resampler->SetBuffer(samples, float(start) / float(number_of_samples));
678 
679  // Resample data, and return new buffer pointer
680  juce::AudioBuffer<float> *buffer = resampler->GetResampledBuffer();
681 
682  // Add the newly resized audio samples to the current frame
683  for (int channel = 0; channel < channels; channel++)
684  // Add new (slower) samples, to the frame object
685  frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
686 
687  // Clean up
688  buffer = nullptr;
689  }
690  else {
691  // Use the samples on this frame (but maybe reverse them if needed)
692  samples = new juce::AudioBuffer<float>(channels, number_of_samples);
693  samples->clear();
694 
695  // Loop through channels, and get audio samples
696  for (int channel = 0; channel < channels; channel++)
697  // Get the audio samples for this channel
698  samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
699 
700  // reverse the samples
701  if (!time.IsIncreasing(frame_number))
702  reverse_buffer(samples);
703 
704  // Add reversed samples to the frame object
705  for (int channel = 0; channel < channels; channel++)
706  frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
707 
708 
709  }
710 
711  delete samples;
712  samples = nullptr;
713  }
714  }
715 }
716 
717 // Adjust frame number minimum value
718 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
719 {
720  // Never return a frame number 0 or below
721  if (frame_number < 1)
722  return 1;
723  else
724  return frame_number;
725 
726 }
727 
728 // Get or generate a blank frame
729 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
730 {
731  try {
732  // Debug output
734  "Clip::GetOrCreateFrame (from reader)",
735  "number", number);
736 
737  // Attempt to get a frame (but this could fail if a reader has just been closed)
738  auto reader_frame = reader->GetFrame(number);
739 
740  // Return real frame
741  if (reader_frame) {
742  // Create a new copy of reader frame
743  // This allows a clip to modify the pixels and audio of this frame without
744  // changing the underlying reader's frame data
745  auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
746  if (has_video.GetInt(number) == 0) {
747  // No video, so add transparent pixels
748  reader_copy->AddColor(QColor(Qt::transparent));
749  }
750  if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
751  // No audio, so include silence (also, mute audio if past end of reader)
752  reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
753  }
754  return reader_copy;
755  }
756 
757  } catch (const ReaderClosed & e) {
758  // ...
759  } catch (const OutOfBoundsFrame & e) {
760  // ...
761  }
762 
763  // Estimate # of samples needed for this frame
764  int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
765 
766  // Debug output
768  "Clip::GetOrCreateFrame (create blank)",
769  "number", number,
770  "estimated_samples_in_frame", estimated_samples_in_frame);
771 
772  // Create blank frame
773  auto new_frame = std::make_shared<Frame>(
774  number, reader->info.width, reader->info.height,
775  "#000000", estimated_samples_in_frame, reader->info.channels);
776  new_frame->SampleRate(reader->info.sample_rate);
777  new_frame->ChannelsLayout(reader->info.channel_layout);
778  new_frame->AddAudioSilence(estimated_samples_in_frame);
779  return new_frame;
780 }
781 
782 // Generate JSON string of this object
783 std::string Clip::Json() const {
784 
785  // Return formatted string
786  return JsonValue().toStyledString();
787 }
788 
789 // Get all properties for a specific frame
790 std::string Clip::PropertiesJSON(int64_t requested_frame) const {
791 
792  // Generate JSON properties list
793  Json::Value root;
794  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
795  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
796  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
797  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
798  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
799  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
800  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
801  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
802  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
803  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
804  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
805  if (!parentObjectId.empty()) {
806  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
807  } else {
808  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", "", NULL, -1, -1, false, requested_frame);
809  }
810  // Add gravity choices (dropdown style)
811  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
812  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
813  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
814  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
815  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
816  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
817  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
818  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
819  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
820 
821  // Add scale choices (dropdown style)
822  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
823  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
824  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
825  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
826 
827  // Add frame number display choices (dropdown style)
828  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
829  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
830  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
831  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
832 
833  // Add volume mixing choices (dropdown style)
834  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
835  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
836  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
837 
838  // Add waveform choices (dropdown style)
839  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
840  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
841 
842  // Add the parentTrackedObject's properties
843  if (parentTrackedObject)
844  {
845  // Convert Clip's frame position to Timeline's frame position
846  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
847  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
848  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
849 
850  // Get attached object's parent clip properties
851  std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
852  double parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
853  // Get attached object properties
854  std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
855 
856  // Correct the parent Tracked Object properties by the clip's reference system
857  float parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["cx"];
858  float parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["cy"];
859  float parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
860  float parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
861  float parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["r"];
862 
863  // Add the parent Tracked Object properties to JSON
864  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
865  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
866  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
867  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
868  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
869  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
870  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
871  }
872  // Add the parentClipObject's properties
873  else if (parentClipObject)
874  {
875  // Convert Clip's frame position to Timeline's frame position
876  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
877  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
878  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
879 
880  // Correct the parent Clip Object properties by the clip's reference system
881  float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
882  float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
883  float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
884  float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
885  float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
886  float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
887  float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
888 
889  // Add the parent Clip Object properties to JSON
890  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
891  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
892  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
893  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
894  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
895  root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
896  root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
897  }
898  else
899  {
900  // Add this own clip's properties to JSON
901  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
902  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
903  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
904  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
905  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
906  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
907  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
908  }
909 
910  // Keyframes
911  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
912  root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
913  root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
914  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
915  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
916  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
917  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
918  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
919  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
920 
921  // Add enable audio/video choices (dropdown style)
922  root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
923  root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
924  root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
925  root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
926  root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
927  root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
928 
929  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
930  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
931  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
932  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
933 
934 
935  // Return formatted string
936  return root.toStyledString();
937 }
938 
939 // Generate Json::Value for this object
940 Json::Value Clip::JsonValue() const {
941 
942  // Create root json object
943  Json::Value root = ClipBase::JsonValue(); // get parent properties
944  root["parentObjectId"] = parentObjectId;
945  root["gravity"] = gravity;
946  root["scale"] = scale;
947  root["anchor"] = anchor;
948  root["display"] = display;
949  root["mixing"] = mixing;
950  root["waveform"] = waveform;
951  root["scale_x"] = scale_x.JsonValue();
952  root["scale_y"] = scale_y.JsonValue();
953  root["location_x"] = location_x.JsonValue();
954  root["location_y"] = location_y.JsonValue();
955  root["alpha"] = alpha.JsonValue();
956  root["rotation"] = rotation.JsonValue();
957  root["time"] = time.JsonValue();
958  root["volume"] = volume.JsonValue();
959  root["wave_color"] = wave_color.JsonValue();
960  root["shear_x"] = shear_x.JsonValue();
961  root["shear_y"] = shear_y.JsonValue();
962  root["origin_x"] = origin_x.JsonValue();
963  root["origin_y"] = origin_y.JsonValue();
964  root["channel_filter"] = channel_filter.JsonValue();
965  root["channel_mapping"] = channel_mapping.JsonValue();
966  root["has_audio"] = has_audio.JsonValue();
967  root["has_video"] = has_video.JsonValue();
968  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
969  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
970  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
971  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
972  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
973  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
974  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
975  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
976 
977  // Add array of effects
978  root["effects"] = Json::Value(Json::arrayValue);
979 
980  // loop through effects
981  for (auto existing_effect : effects)
982  {
983  root["effects"].append(existing_effect->JsonValue());
984  }
985 
986  if (reader)
987  root["reader"] = reader->JsonValue();
988  else
989  root["reader"] = Json::Value(Json::objectValue);
990 
991  // return JsonValue
992  return root;
993 }
994 
995 // Load JSON string into this object
996 void Clip::SetJson(const std::string value) {
997 
998  // Parse JSON string into JSON objects
999  try
1000  {
1001  const Json::Value root = openshot::stringToJson(value);
1002  // Set all values that match
1003  SetJsonValue(root);
1004  }
1005  catch (const std::exception& e)
1006  {
1007  // Error parsing JSON (or missing keys)
1008  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1009  }
1010 }
1011 
1012 // Load Json::Value into this object
1013 void Clip::SetJsonValue(const Json::Value root) {
1014 
1015  // Set parent data
1016  ClipBase::SetJsonValue(root);
1017 
1018  // Set data from Json (if key is found)
1019  if (!root["parentObjectId"].isNull()){
1020  parentObjectId = root["parentObjectId"].asString();
1021  if (parentObjectId.size() > 0 && parentObjectId != ""){
1022  AttachToObject(parentObjectId);
1023  } else{
1024  parentTrackedObject = nullptr;
1025  parentClipObject = NULL;
1026  }
1027  }
1028  if (!root["gravity"].isNull())
1029  gravity = (GravityType) root["gravity"].asInt();
1030  if (!root["scale"].isNull())
1031  scale = (ScaleType) root["scale"].asInt();
1032  if (!root["anchor"].isNull())
1033  anchor = (AnchorType) root["anchor"].asInt();
1034  if (!root["display"].isNull())
1035  display = (FrameDisplayType) root["display"].asInt();
1036  if (!root["mixing"].isNull())
1037  mixing = (VolumeMixType) root["mixing"].asInt();
1038  if (!root["waveform"].isNull())
1039  waveform = root["waveform"].asBool();
1040  if (!root["scale_x"].isNull())
1041  scale_x.SetJsonValue(root["scale_x"]);
1042  if (!root["scale_y"].isNull())
1043  scale_y.SetJsonValue(root["scale_y"]);
1044  if (!root["location_x"].isNull())
1045  location_x.SetJsonValue(root["location_x"]);
1046  if (!root["location_y"].isNull())
1047  location_y.SetJsonValue(root["location_y"]);
1048  if (!root["alpha"].isNull())
1049  alpha.SetJsonValue(root["alpha"]);
1050  if (!root["rotation"].isNull())
1051  rotation.SetJsonValue(root["rotation"]);
1052  if (!root["time"].isNull())
1053  time.SetJsonValue(root["time"]);
1054  if (!root["volume"].isNull())
1055  volume.SetJsonValue(root["volume"]);
1056  if (!root["wave_color"].isNull())
1057  wave_color.SetJsonValue(root["wave_color"]);
1058  if (!root["shear_x"].isNull())
1059  shear_x.SetJsonValue(root["shear_x"]);
1060  if (!root["shear_y"].isNull())
1061  shear_y.SetJsonValue(root["shear_y"]);
1062  if (!root["origin_x"].isNull())
1063  origin_x.SetJsonValue(root["origin_x"]);
1064  if (!root["origin_y"].isNull())
1065  origin_y.SetJsonValue(root["origin_y"]);
1066  if (!root["channel_filter"].isNull())
1067  channel_filter.SetJsonValue(root["channel_filter"]);
1068  if (!root["channel_mapping"].isNull())
1069  channel_mapping.SetJsonValue(root["channel_mapping"]);
1070  if (!root["has_audio"].isNull())
1071  has_audio.SetJsonValue(root["has_audio"]);
1072  if (!root["has_video"].isNull())
1073  has_video.SetJsonValue(root["has_video"]);
1074  if (!root["perspective_c1_x"].isNull())
1075  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1076  if (!root["perspective_c1_y"].isNull())
1077  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1078  if (!root["perspective_c2_x"].isNull())
1079  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1080  if (!root["perspective_c2_y"].isNull())
1081  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1082  if (!root["perspective_c3_x"].isNull())
1083  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1084  if (!root["perspective_c3_y"].isNull())
1085  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1086  if (!root["perspective_c4_x"].isNull())
1087  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1088  if (!root["perspective_c4_y"].isNull())
1089  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1090  if (!root["effects"].isNull()) {
1091 
1092  // Clear existing effects
1093  effects.clear();
1094 
1095  // loop through effects
1096  for (const auto existing_effect : root["effects"]) {
1097  // Create Effect
1098  EffectBase *e = NULL;
1099  if (!existing_effect["type"].isNull()) {
1100 
1101  // Create instance of effect
1102  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1103 
1104  // Load Json into Effect
1105  e->SetJsonValue(existing_effect);
1106 
1107  // Add Effect to Timeline
1108  AddEffect(e);
1109  }
1110  }
1111  }
1112  }
1113  if (!root["reader"].isNull()) // does Json contain a reader?
1114  {
1115  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1116  {
1117  // Close previous reader (if any)
1118  bool already_open = false;
1119  if (reader)
1120  {
1121  // Track if reader was open
1122  already_open = reader->IsOpen();
1123 
1124  // Close and delete existing allocated reader (if any)
1125  Reader(NULL);
1126  }
1127 
1128  // Create new reader (and load properties)
1129  std::string type = root["reader"]["type"].asString();
1130 
1131  if (type == "FFmpegReader") {
1132 
1133  // Create new reader
1134  reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1135  reader->SetJsonValue(root["reader"]);
1136 
1137  } else if (type == "QtImageReader") {
1138 
1139  // Create new reader
1140  reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1141  reader->SetJsonValue(root["reader"]);
1142 
1143 #ifdef USE_IMAGEMAGICK
1144  } else if (type == "ImageReader") {
1145 
1146  // Create new reader
1147  reader = new ImageReader(root["reader"]["path"].asString(), false);
1148  reader->SetJsonValue(root["reader"]);
1149 
1150  } else if (type == "TextReader") {
1151 
1152  // Create new reader
1153  reader = new TextReader();
1154  reader->SetJsonValue(root["reader"]);
1155 #endif
1156 
1157  } else if (type == "ChunkReader") {
1158 
1159  // Create new reader
1160  reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1161  reader->SetJsonValue(root["reader"]);
1162 
1163  } else if (type == "DummyReader") {
1164 
1165  // Create new reader
1166  reader = new openshot::DummyReader();
1167  reader->SetJsonValue(root["reader"]);
1168 
1169  } else if (type == "Timeline") {
1170 
1171  // Create new reader (always load from file again)
1172  // This prevents FrameMappers from being loaded on accident
1173  reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1174  }
1175 
1176  // mark as managed reader and set parent
1177  if (reader) {
1178  reader->ParentClip(this);
1179  allocated_reader = reader;
1180  }
1181 
1182  // Re-Open reader (if needed)
1183  if (already_open)
1184  reader->Open();
1185 
1186  }
1187  }
1188 }
1189 
1190 // Sort effects by order
1191 void Clip::sort_effects()
1192 {
1193  // sort clips
1194  effects.sort(CompareClipEffects());
1195 }
1196 
1197 // Add an effect to the clip
1199 {
1200  // Set parent clip pointer
1201  effect->ParentClip(this);
1202 
1203  // Add effect to list
1204  effects.push_back(effect);
1205 
1206  // Sort effects
1207  sort_effects();
1208 
1209  // Get the parent timeline of this clip
1210  Timeline* parentTimeline = (Timeline *) ParentTimeline();
1211 
1212  if (parentTimeline)
1213  effect->ParentTimeline(parentTimeline);
1214 
1215  #ifdef USE_OPENCV
1216  // Add Tracked Object to Timeline
1217  if (effect->info.has_tracked_object){
1218 
1219  // Check if this clip has a parent timeline
1220  if (parentTimeline){
1221 
1222  effect->ParentTimeline(parentTimeline);
1223 
1224  // Iterate through effect's vector of Tracked Objects
1225  for (auto const& trackedObject : effect->trackedObjects){
1226 
1227  // Cast the Tracked Object as TrackedObjectBBox
1228  std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1229 
1230  // Set the Tracked Object's parent clip to this
1231  trackedObjectBBox->ParentClip(this);
1232 
1233  // Add the Tracked Object to the timeline
1234  parentTimeline->AddTrackedObject(trackedObjectBBox);
1235  }
1236  }
1237  }
1238  #endif
1239 }
1240 
1241 // Remove an effect from the clip
1243 {
1244  effects.remove(effect);
1245 }
1246 
1247 // Apply effects to the source frame (if any)
1248 void Clip::apply_effects(std::shared_ptr<Frame> frame)
1249 {
1250  // Find Effects at this position and layer
1251  for (auto effect : effects)
1252  {
1253  // Apply the effect to this frame
1254  frame = effect->GetFrame(frame, frame->number);
1255 
1256  } // end effect loop
1257 }
1258 
1259 // Compare 2 floating point numbers for equality
1260 bool Clip::isEqual(double a, double b)
1261 {
1262  return fabs(a - b) < 0.000001;
1263 }
1264 
1265 // Apply keyframes to the source frame (if any)
1266 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
1267  // Skip out if video was disabled or only an audio frame (no visualisation in use)
1268  if (!Waveform() && !Reader()->info.has_video) {
1269  // Skip the rest of the image processing for performance reasons
1270  return;
1271  }
1272 
1273  // Get image from clip
1274  std::shared_ptr<QImage> source_image = frame->GetImage();
1275 
1276  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
1277  if (Waveform())
1278  {
1279  // Debug output
1281  "Clip::get_transform (Generate Waveform Image)",
1282  "frame->number", frame->number,
1283  "Waveform()", Waveform(),
1284  "background_canvas->width()", background_canvas->width(),
1285  "background_canvas->height()", background_canvas->height());
1286 
1287  // Get the color of the waveform
1288  int red = wave_color.red.GetInt(frame->number);
1289  int green = wave_color.green.GetInt(frame->number);
1290  int blue = wave_color.blue.GetInt(frame->number);
1291  int alpha = wave_color.alpha.GetInt(frame->number);
1292 
1293  // Generate Waveform Dynamically (the size of the timeline)
1294  source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue, alpha);
1295  frame->AddImage(source_image);
1296  }
1297 
1298  // Get transform from clip's keyframes
1299  QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1300 
1301  // Debug output
1303  "Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)",
1304  "frame->number", frame->number,
1305  "background_canvas->width()", background_canvas->width(),
1306  "background_canvas->height()", background_canvas->height());
1307 
1308  // Load timeline's new frame image into a QPainter
1309  QPainter painter(background_canvas.get());
1310  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
1311 
1312  // Apply transform (translate, rotate, scale)
1313  painter.setTransform(transform);
1314 
1315  // Composite a new layer onto the image
1316  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1317  painter.drawImage(0, 0, *source_image);
1318 
1319  if (timeline) {
1320  Timeline *t = (Timeline *) timeline;
1321 
1322  // Draw frame #'s on top of image (if needed)
1323  if (display != FRAME_DISPLAY_NONE) {
1324  std::stringstream frame_number_str;
1325  switch (display) {
1326  case (FRAME_DISPLAY_NONE):
1327  // This is only here to prevent unused-enum warnings
1328  break;
1329 
1330  case (FRAME_DISPLAY_CLIP):
1331  frame_number_str << frame->number;
1332  break;
1333 
1334  case (FRAME_DISPLAY_TIMELINE):
1335  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
1336  break;
1337 
1338  case (FRAME_DISPLAY_BOTH):
1339  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1340  break;
1341  }
1342 
1343  // Draw frame number on top of image
1344  painter.setPen(QColor("#ffffff"));
1345  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1346  }
1347  }
1348  painter.end();
1349 
1350  // Add new QImage to frame
1351  frame->AddImage(background_canvas);
1352 }
1353 
1354 // Apply keyframes to the source frame (if any)
1355 QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1356 {
1357  // Get image from clip
1358  std::shared_ptr<QImage> source_image = frame->GetImage();
1359 
1360  /* ALPHA & OPACITY */
1361  if (alpha.GetValue(frame->number) != 1.0)
1362  {
1363  float alpha_value = alpha.GetValue(frame->number);
1364 
1365  // Get source image's pixels
1366  unsigned char *pixels = source_image->bits();
1367 
1368  // Loop through pixels
1369  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1370  {
1371  // Apply alpha to pixel values (since we use a premultiplied value, we must
1372  // multiply the alpha with all colors).
1373  pixels[byte_index + 0] *= alpha_value;
1374  pixels[byte_index + 1] *= alpha_value;
1375  pixels[byte_index + 2] *= alpha_value;
1376  pixels[byte_index + 3] *= alpha_value;
1377  }
1378 
1379  // Debug output
1381  "Clip::get_transform (Set Alpha & Opacity)",
1382  "alpha_value", alpha_value,
1383  "frame->number", frame->number);
1384  }
1385 
1386  /* RESIZE SOURCE IMAGE - based on scale type */
1387  QSize source_size = source_image->size();
1388 
1389  // Apply stretch scale to correctly fit the bounding-box
1390  if (parentTrackedObject){
1391  scale = SCALE_STRETCH;
1392  }
1393 
1394  switch (scale)
1395  {
1396  case (SCALE_FIT): {
1397  source_size.scale(width, height, Qt::KeepAspectRatio);
1398 
1399  // Debug output
1401  "Clip::get_transform (Scale: SCALE_FIT)",
1402  "frame->number", frame->number,
1403  "source_width", source_size.width(),
1404  "source_height", source_size.height());
1405  break;
1406  }
1407  case (SCALE_STRETCH): {
1408  source_size.scale(width, height, Qt::IgnoreAspectRatio);
1409 
1410  // Debug output
1412  "Clip::get_transform (Scale: SCALE_STRETCH)",
1413  "frame->number", frame->number,
1414  "source_width", source_size.width(),
1415  "source_height", source_size.height());
1416  break;
1417  }
1418  case (SCALE_CROP): {
1419  source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
1420 
1421  // Debug output
1423  "Clip::get_transform (Scale: SCALE_CROP)",
1424  "frame->number", frame->number,
1425  "source_width", source_size.width(),
1426  "source_height", source_size.height());
1427  break;
1428  }
1429  case (SCALE_NONE): {
1430  // Image is already the original size (i.e. no scaling mode) relative
1431  // to the preview window size (i.e. timeline / preview ratio). No further
1432  // scaling is needed here.
1433  // Debug output
1435  "Clip::get_transform (Scale: SCALE_NONE)",
1436  "frame->number", frame->number,
1437  "source_width", source_size.width(),
1438  "source_height", source_size.height());
1439  break;
1440  }
1441  }
1442 
1443  // Initialize parent object's properties (Clip or Tracked Object)
1444  float parentObject_location_x = 0.0;
1445  float parentObject_location_y = 0.0;
1446  float parentObject_scale_x = 1.0;
1447  float parentObject_scale_y = 1.0;
1448  float parentObject_shear_x = 0.0;
1449  float parentObject_shear_y = 0.0;
1450  float parentObject_rotation = 0.0;
1451 
1452  // Get the parentClipObject properties
1453  if (parentClipObject){
1454 
1455  // Convert Clip's frame position to Timeline's frame position
1456  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1457  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1458  double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1459 
1460  // Get parent object's properties (Clip)
1461  parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
1462  parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
1463  parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
1464  parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
1465  parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
1466  parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
1467  parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
1468  }
1469 
1470  // Get the parentTrackedObject properties
1471  if (parentTrackedObject){
1472 
1473  // Convert Clip's frame position to Timeline's frame position
1474  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1475  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1476  double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1477 
1478  // Get parentTrackedObject's parent clip's properties
1479  std::map<std::string, float> trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
1480 
1481  // Get the attached object's parent clip's properties
1482  if (!trackedObjectParentClipProperties.empty())
1483  {
1484  // Get parent object's properties (Tracked Object)
1485  float parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
1486 
1487  // Access the parentTrackedObject's properties
1488  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
1489 
1490  // Get the Tracked Object's properties and correct them by the clip's reference system
1491  parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["location_x"];
1492  parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["location_y"];
1493  parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1494  parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1495  parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["rotation"];
1496  }
1497  else
1498  {
1499  // Access the parentTrackedObject's properties
1500  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
1501 
1502  // Get the Tracked Object's properties and correct them by the clip's reference system
1503  parentObject_location_x = trackedObjectProperties["cx"] - 0.5;
1504  parentObject_location_y = trackedObjectProperties["cy"] - 0.5;
1505  parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1506  parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1507  parentObject_rotation = trackedObjectProperties["r"];
1508  }
1509  }
1510 
1511  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1512  float x = 0.0; // left
1513  float y = 0.0; // top
1514 
1515  // Adjust size for scale x and scale y
1516  float sx = scale_x.GetValue(frame->number); // percentage X scale
1517  float sy = scale_y.GetValue(frame->number); // percentage Y scale
1518 
1519  // Change clip's scale to parentObject's scale
1520  if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1521  sx*= parentObject_scale_x;
1522  sy*= parentObject_scale_y;
1523  }
1524 
1525  float scaled_source_width = source_size.width() * sx;
1526  float scaled_source_height = source_size.height() * sy;
1527 
1528  switch (gravity)
1529  {
1530  case (GRAVITY_TOP_LEFT):
1531  // This is only here to prevent unused-enum warnings
1532  break;
1533  case (GRAVITY_TOP):
1534  x = (width - scaled_source_width) / 2.0; // center
1535  break;
1536  case (GRAVITY_TOP_RIGHT):
1537  x = width - scaled_source_width; // right
1538  break;
1539  case (GRAVITY_LEFT):
1540  y = (height - scaled_source_height) / 2.0; // center
1541  break;
1542  case (GRAVITY_CENTER):
1543  x = (width - scaled_source_width) / 2.0; // center
1544  y = (height - scaled_source_height) / 2.0; // center
1545  break;
1546  case (GRAVITY_RIGHT):
1547  x = width - scaled_source_width; // right
1548  y = (height - scaled_source_height) / 2.0; // center
1549  break;
1550  case (GRAVITY_BOTTOM_LEFT):
1551  y = (height - scaled_source_height); // bottom
1552  break;
1553  case (GRAVITY_BOTTOM):
1554  x = (width - scaled_source_width) / 2.0; // center
1555  y = (height - scaled_source_height); // bottom
1556  break;
1557  case (GRAVITY_BOTTOM_RIGHT):
1558  x = width - scaled_source_width; // right
1559  y = (height - scaled_source_height); // bottom
1560  break;
1561  }
1562 
1563  // Debug output
1565  "Clip::get_transform (Gravity)",
1566  "frame->number", frame->number,
1567  "source_clip->gravity", gravity,
1568  "scaled_source_width", scaled_source_width,
1569  "scaled_source_height", scaled_source_height);
1570 
1571  QTransform transform;
1572 
1573  /* LOCATION, ROTATION, AND SCALE */
1574  float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1575  x += (width * (location_x.GetValue(frame->number) + parentObject_location_x )); // move in percentage of final width
1576  y += (height * (location_y.GetValue(frame->number) + parentObject_location_y )); // move in percentage of final height
1577  float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1578  float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1579  float origin_x_value = origin_x.GetValue(frame->number);
1580  float origin_y_value = origin_y.GetValue(frame->number);
1581 
1582  // Transform source image (if needed)
1584  "Clip::get_transform (Build QTransform - if needed)",
1585  "frame->number", frame->number,
1586  "x", x, "y", y,
1587  "r", r,
1588  "sx", sx, "sy", sy);
1589 
1590  if (!isEqual(x, 0) || !isEqual(y, 0)) {
1591  // TRANSLATE/MOVE CLIP
1592  transform.translate(x, y);
1593  }
1594  if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
1595  // ROTATE CLIP (around origin_x, origin_y)
1596  float origin_x_offset = (scaled_source_width * origin_x_value);
1597  float origin_y_offset = (scaled_source_height * origin_y_value);
1598  transform.translate(origin_x_offset, origin_y_offset);
1599  transform.rotate(r);
1600  transform.shear(shear_x_value, shear_y_value);
1601  transform.translate(-origin_x_offset,-origin_y_offset);
1602  }
1603  // SCALE CLIP (if needed)
1604  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1605  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1606  if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
1607  transform.scale(source_width_scale, source_height_scale);
1608  }
1609 
1610  return transform;
1611 }
Header file for AudioResampler class.
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class is used to resample audio data for many sequential frames.
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:79
float Start() const
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:88
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:38
float Duration() const
Get the length of this clip (in seconds)
Definition: ClipBase.h:90
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
Definition: ClipBase.h:89
std::string Id() const
Get the Id of this clip object.
Definition: ClipBase.h:85
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ClipBase.cpp:64
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:132
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:87
openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
Definition: ClipBase.h:91
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ClipBase.cpp:80
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition: ClipBase.h:41
float Position() const
Get position on timeline (in seconds)
Definition: ClipBase.h:86
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:39
std::string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:40
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition: ClipBase.cpp:96
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:90
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition: Clip.cpp:257
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:286
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition: Clip.h:289
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:294
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:313
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:157
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:159
void Open() override
Open the internal reader.
Definition: Clip.cpp:315
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:293
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition: Clip.h:317
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:158
void init_reader_rotation()
Update default rotation from reader.
Definition: Clip.cpp:112
Clip()
Default Constructor.
Definition: Clip.cpp:132
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:307
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition: Clip.cpp:235
std::string Json() const override
Generate JSON string of this object.
Definition: Clip.cpp:783
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition: Clip.cpp:470
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition: Clip.cpp:1013
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition: Clip.cpp:378
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:290
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:321
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:311
void init_reader_settings()
Init reader info details.
Definition: Clip.cpp:101
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:308
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Clip.cpp:940
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition: Clip.cpp:263
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:314
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition: Clip.h:300
bool Waveform()
Get the waveform property of this clip.
Definition: Clip.h:282
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:155
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:312
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:1198
void Close() override
Close the internal reader.
Definition: Clip.cpp:336
virtual ~Clip()
Destructor.
Definition: Clip.cpp:218
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:310
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:301
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:295
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:287
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition: Clip.cpp:351
openshot::ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:305
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1242
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition: Clip.h:318
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:322
std::string PropertiesJSON(int64_t requested_frame) const override
Definition: Clip.cpp:790
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:304
void init_settings()
Init default settings for a clip.
Definition: Clip.cpp:36
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:309
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:156
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition: Clip.h:288
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition: Clip.h:296
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition: Clip.h:93
void SetJson(const std::string value) override
Load JSON string into this object.
Definition: Clip.cpp:996
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition: Clip.h:297
This class represents a color (used on the timeline and clips)
Definition: Color.h:27
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:32
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:30
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:31
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:117
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:33
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:86
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
Definition: DummyReader.h:86
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:53
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
Definition: EffectBase.cpp:173
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: EffectBase.cpp:112
EffectInfoStruct info
Information about the current effect.
Definition: EffectBase.h:69
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
Definition: EffectBase.h:66
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:29
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:113
int num
Numerator for the fraction.
Definition: Fraction.h:32
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:35
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:40
int den
Denominator for the fraction.
Definition: Fraction.h:33
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:120
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:61
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:534
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:56
Exception for invalid JSON.
Definition: Exceptions.h:218
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition: KeyFrame.h:54
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:282
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:358
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:482
int64_t GetLength() const
Definition: KeyFrame.cpp:500
Fraction GetRepeatFraction(int64_t index) const
Get the fraction that represents how many times this value is repeated in the curve.
Definition: KeyFrame.cpp:379
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:287
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:258
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:325
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:292
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:507
Exception for frames that are out of bounds.
Definition: Exceptions.h:301
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:75
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:76
virtual bool IsOpen()=0
Determine if reader is open or closed.
virtual std::string Name()=0
Return the type name of the class.
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:88
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:162
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:107
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Definition: ReaderBase.cpp:245
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:364
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:63
This class represents a timeline.
Definition: Timeline.h:150
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition: Timeline.cpp:223
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition: Timeline.cpp:241
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition: Timeline.cpp:408
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer)
Apply global/timeline effects to the source frame (if any)
Definition: Timeline.cpp:526
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:173
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:35
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:29
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:45
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition: Enums.h:46
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition: ChunkReader.h:50
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:22
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition: Enums.h:23
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:26
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition: Enums.h:25
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:28
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition: Enums.h:29
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition: Enums.h:30
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition: Enums.h:24
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition: Enums.h:31
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:27
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:36
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:38
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:39
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition: Enums.h:37
@ SCALE_NONE
Do not scale the clip.
Definition: Enums.h:40
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:61
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition: Enums.h:63
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:62
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition: Enums.h:64
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition: Enums.h:52
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition: Enums.h:54
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition: Enums.h:55
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition: Enums.h:56
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition: Enums.h:53
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:16
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition: EffectBase.h:42
float duration
Length of time (in seconds)
Definition: ReaderBase.h:43
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:46
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:61
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:48
int height
The height of the video (in pixels)
Definition: ReaderBase.h:45
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:53
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:65
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:62
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:40
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:41
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:60
This struct contains info about the current Timeline clip instance.
Definition: TimelineBase.h:33
bool is_top_clip
Is clip on top (if overlapping another clip)
Definition: TimelineBase.h:34