ResidualVM logo ResidualVM website - Forums - Contact us BuildBot - Doxygen - Wiki curved edge

qt_decoder.cpp

Go to the documentation of this file.
00001 /* ScummVM - Graphic Adventure Engine
00002  *
00003  * ScummVM is the legal property of its developers, whose names
00004  * are too numerous to list here. Please refer to the COPYRIGHT
00005  * file distributed with this source distribution.
00006  *
00007  * This program is free software; you can redistribute it and/or
00008  * modify it under the terms of the GNU General Public License
00009  * as published by the Free Software Foundation; either version 2
00010  * of the License, or (at your option) any later version.
00011  *
00012  * This program is distributed in the hope that it will be useful,
00013  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00014  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00015  * GNU General Public License for more details.
00016  *
00017  * You should have received a copy of the GNU General Public License
00018  * along with this program; if not, write to the Free Software
00019  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
00020  *
00021  */
00022 
00023 //
00024 // Partially based on ffmpeg code.
00025 //
00026 // Copyright (c) 2001 Fabrice Bellard.
00027 // First version by Francois Revol revol@free.fr
00028 // Seek function by Gael Chardon gael.dev@4now.net
00029 //
00030 
00031 #include "video/qt_decoder.h"
00032 
00033 #include "audio/audiostream.h"
00034 
00035 #include "common/debug.h"
00036 #include "common/memstream.h"
00037 #include "common/system.h"
00038 #include "common/textconsole.h"
00039 #include "common/util.h"
00040 
00041 // Video codecs
00042 #include "image/codecs/codec.h"
00043 
00044 namespace Video {
00045 
00047 // QuickTimeDecoder
00049 
00050 QuickTimeDecoder::QuickTimeDecoder() {
00051     _scaledSurface = 0;
00052     _width = _height = 0;
00053 }
00054 
00055 QuickTimeDecoder::~QuickTimeDecoder() {
00056     close();
00057 }
00058 
00059 bool QuickTimeDecoder::loadFile(const Common::String &filename) {
00060     if (!Common::QuickTimeParser::parseFile(filename))
00061         return false;
00062 
00063     init();
00064     return true;
00065 }
00066 
00067 bool QuickTimeDecoder::loadStream(Common::SeekableReadStream *stream) {
00068     if (!Common::QuickTimeParser::parseStream(stream))
00069         return false;
00070 
00071     init();
00072     return true;
00073 }
00074 
00075 void QuickTimeDecoder::close() {
00076     VideoDecoder::close();
00077     Common::QuickTimeParser::close();
00078 
00079     if (_scaledSurface) {
00080         _scaledSurface->free();
00081         delete _scaledSurface;
00082         _scaledSurface = 0;
00083     }
00084 }
00085 
00086 const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
00087     const Graphics::Surface *frame = VideoDecoder::decodeNextFrame();
00088 
00089     // Update audio buffers too
00090     // (needs to be done after we find the next track)
00091     updateAudioBuffer();
00092 
00093     // We have to initialize the scaled surface
00094     if (frame && (_scaleFactorX != 1 || _scaleFactorY != 1)) {
00095         if (!_scaledSurface) {
00096             _scaledSurface = new Graphics::Surface();
00097             _scaledSurface->create(_width, _height, getPixelFormat());
00098         }
00099 
00100         scaleSurface(frame, _scaledSurface, _scaleFactorX, _scaleFactorY);
00101         return _scaledSurface;
00102     }
00103 
00104     return frame;
00105 }
00106 
00107 Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(Common::QuickTimeParser::Track *track, uint32 format, uint32 descSize) {
00108     if (track->codecType == CODEC_TYPE_VIDEO) {
00109         debug(0, "Video Codec FourCC: \'%s\'", tag2str(format));
00110 
00111         VideoSampleDesc *entry = new VideoSampleDesc(track, format);
00112 
00113         _fd->readUint16BE(); // version
00114         _fd->readUint16BE(); // revision level
00115         _fd->readUint32BE(); // vendor
00116         _fd->readUint32BE(); // temporal quality
00117         _fd->readUint32BE(); // spacial quality
00118 
00119         uint16 width = _fd->readUint16BE(); // width
00120         uint16 height = _fd->readUint16BE(); // height
00121 
00122         // The width is most likely invalid for entries after the first one
00123         // so only set the overall width if it is not zero here.
00124         if (width)
00125             track->width = width;
00126 
00127         if (height)
00128             track->height = height;
00129 
00130         _fd->readUint32BE(); // horiz resolution
00131         _fd->readUint32BE(); // vert resolution
00132         _fd->readUint32BE(); // data size, always 0
00133         _fd->readUint16BE(); // frames per samples
00134 
00135         byte codecName[32];
00136         _fd->read(codecName, 32); // codec name, pascal string (FIXME: true for mp4?)
00137         if (codecName[0] <= 31) {
00138             memcpy(entry->_codecName, &codecName[1], codecName[0]);
00139             entry->_codecName[codecName[0]] = 0;
00140         }
00141 
00142         entry->_bitsPerSample = _fd->readUint16BE(); // depth
00143         entry->_colorTableId = _fd->readUint16BE(); // colortable id
00144 
00145         // figure out the palette situation
00146         byte colorDepth = entry->_bitsPerSample & 0x1F;
00147         bool colorGreyscale = (entry->_bitsPerSample & 0x20) != 0;
00148 
00149         debug(0, "color depth: %d", colorDepth);
00150 
00151         // if the depth is 2, 4, or 8 bpp, file is palettized
00152         if (colorDepth == 2 || colorDepth == 4 || colorDepth == 8) {
00153             // Initialize the palette
00154             entry->_palette = new byte[256 * 3];
00155             memset(entry->_palette, 0, 256 * 3);
00156 
00157             if (colorGreyscale) {
00158                 debug(0, "Greyscale palette");
00159 
00160                 // compute the greyscale palette
00161                 uint16 colorCount = 1 << colorDepth;
00162                 int16 colorIndex = 255;
00163                 byte colorDec = 256 / (colorCount - 1);
00164                 for (byte j = 0; j < colorCount; j++) {
00165                     entry->_palette[j * 3] = entry->_palette[j * 3 + 1] = entry->_palette[j * 3 + 2] = colorIndex;
00166                     colorIndex -= colorDec;
00167                     if (colorIndex < 0)
00168                         colorIndex = 0;
00169                 }
00170             } else if (entry->_colorTableId & 0x08) {
00171                 // if flag bit 3 is set, use the default palette
00172                 //uint16 colorCount = 1 << colorDepth;
00173 
00174                 warning("Predefined palette! %dbpp", colorDepth);
00175             } else {
00176                 debug(0, "Palette from file");
00177 
00178                 // load the palette from the file
00179                 uint32 colorStart = _fd->readUint32BE();
00180                 /* uint16 colorCount = */ _fd->readUint16BE();
00181                 uint16 colorEnd = _fd->readUint16BE();
00182                 for (uint32 j = colorStart; j <= colorEnd; j++) {
00183                     // each R, G, or B component is 16 bits;
00184                     // only use the top 8 bits; skip alpha bytes
00185                     // up front
00186                     _fd->readByte();
00187                     _fd->readByte();
00188                     entry->_palette[j * 3] = _fd->readByte();
00189                     _fd->readByte();
00190                     entry->_palette[j * 3 + 1] = _fd->readByte();
00191                     _fd->readByte();
00192                     entry->_palette[j * 3 + 2] = _fd->readByte();
00193                     _fd->readByte();
00194                 }
00195             }
00196         }
00197 
00198         return entry;
00199     }
00200 
00201     // Pass it on up
00202     return Audio::QuickTimeAudioDecoder::readSampleDesc(track, format, descSize);
00203 }
00204 
00205 void QuickTimeDecoder::init() {
00206     Audio::QuickTimeAudioDecoder::init();
00207 
00208     // Initialize all the audio tracks
00209     for (uint32 i = 0; i < _audioTracks.size(); i++)
00210         addTrack(new AudioTrackHandler(this, _audioTracks[i]));
00211 
00212     // Initialize all the video tracks
00213     const Common::Array<Common::QuickTimeParser::Track *> &tracks = Common::QuickTimeParser::_tracks;
00214     for (uint32 i = 0; i < tracks.size(); i++) {
00215         if (tracks[i]->codecType == CODEC_TYPE_VIDEO) {
00216             for (uint32 j = 0; j < tracks[i]->sampleDescs.size(); j++)
00217                 ((VideoSampleDesc *)tracks[i]->sampleDescs[j])->initCodec();
00218 
00219             addTrack(new VideoTrackHandler(this, tracks[i]));
00220         }
00221     }
00222 
00223     // Prepare the first video track
00224     VideoTrackHandler *nextVideoTrack = (VideoTrackHandler *)findNextVideoTrack();
00225 
00226     if (nextVideoTrack) {
00227         if (_scaleFactorX != 1 || _scaleFactorY != 1) {
00228             // We have to take the scale into consideration when setting width/height
00229             _width = (nextVideoTrack->getScaledWidth() / _scaleFactorX).toInt();
00230             _height = (nextVideoTrack->getScaledHeight() / _scaleFactorY).toInt();
00231         } else {
00232             _width = nextVideoTrack->getWidth();
00233             _height = nextVideoTrack->getHeight();
00234         }
00235     }
00236 }
00237 
00238 void QuickTimeDecoder::updateAudioBuffer() {
00239     // Updates the audio buffers for all audio tracks
00240     for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++)
00241         if ((*it)->getTrackType() == VideoDecoder::Track::kTrackTypeAudio)
00242             ((AudioTrackHandler *)*it)->updateBuffer();
00243 }
00244 
00245 void QuickTimeDecoder::scaleSurface(const Graphics::Surface *src, Graphics::Surface *dst, const Common::Rational &scaleFactorX, const Common::Rational &scaleFactorY) {
00246     assert(src && dst);
00247 
00248     for (int32 j = 0; j < dst->h; j++)
00249         for (int32 k = 0; k < dst->w; k++)
00250             memcpy(dst->getBasePtr(k, j), src->getBasePtr((k * scaleFactorX).toInt() , (j * scaleFactorY).toInt()), src->format.bytesPerPixel);
00251 }
00252 
00253 QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentTrack, codecTag) {
00254     memset(_codecName, 0, 32);
00255     _colorTableId = 0;
00256     _palette = 0;
00257     _videoCodec = 0;
00258     _bitsPerSample = 0;
00259 }
00260 
00261 QuickTimeDecoder::VideoSampleDesc::~VideoSampleDesc() {
00262     delete[] _palette;
00263     delete _videoCodec;
00264 }
00265 
00266 void QuickTimeDecoder::VideoSampleDesc::initCodec() {
00267     _videoCodec = Image::createQuickTimeCodec(_codecTag, _parentTrack->width, _parentTrack->height, _bitsPerSample & 0x1f);
00268 }
00269 
00270 QuickTimeDecoder::AudioTrackHandler::AudioTrackHandler(QuickTimeDecoder *decoder, QuickTimeAudioTrack *audioTrack) :
00271         SeekableAudioTrack(decoder->getSoundType()),
00272         _decoder(decoder),
00273         _audioTrack(audioTrack) {
00274 }
00275 
00276 void QuickTimeDecoder::AudioTrackHandler::updateBuffer() {
00277     if (_decoder->endOfVideoTracks()) // If we have no video left (or no video), there's nothing to base our buffer against
00278         _audioTrack->queueRemainingAudio();
00279     else // Otherwise, queue enough to get us to the next frame plus another half second spare
00280         _audioTrack->queueAudio(Audio::Timestamp(_decoder->getTimeToNextFrame() + 500, 1000));
00281 }
00282 
00283 Audio::SeekableAudioStream *QuickTimeDecoder::AudioTrackHandler::getSeekableAudioStream() const {
00284     return _audioTrack;
00285 }
00286 
00287 QuickTimeDecoder::VideoTrackHandler::VideoTrackHandler(QuickTimeDecoder *decoder, Common::QuickTimeParser::Track *parent) : _decoder(decoder), _parent(parent) {
00288     checkEditListBounds();
00289 
00290     _curEdit = 0;
00291     enterNewEditList(false);
00292 
00293     _curFrame = -1;
00294     _durationOverride = -1;
00295     _scaledSurface = 0;
00296     _curPalette = 0;
00297     _dirtyPalette = false;
00298     _reversed = false;
00299     _forcedDitherPalette = 0;
00300     _ditherTable = 0;
00301     _ditherFrame = 0;
00302 }
00303 
00304 void QuickTimeDecoder::VideoTrackHandler::checkEditListBounds() {
00305     // Check all the edit list entries are within the bounds of the media
00306     // In the Spanish version of Riven, the last edit of the video ogk.mov
00307     // ends one frame after the end of the media.
00308 
00309     uint32 offset = 0;
00310     uint32 mediaDuration = _parent->mediaDuration * _decoder->_timeScale / _parent->timeScale;
00311 
00312     for (uint i = 0; i < _parent->editList.size(); i++) {
00313         EditListEntry &edit = _parent->editList[i];
00314 
00315         if (edit.mediaTime < 0) {
00316             offset += edit.trackDuration;
00317             continue; // Ignore empty edits
00318         }
00319 
00320         if ((uint32) edit.mediaTime > mediaDuration) {
00321             // Check if the edit starts after the end of the media
00322             // If so, mark it as empty so it is ignored
00323             edit.mediaTime = -1;
00324         } else if (edit.mediaTime + edit.trackDuration > mediaDuration) {
00325             // Check if the edit ends after the end of the media
00326             // If so, clip it so it fits in the media
00327             edit.trackDuration = mediaDuration - edit.mediaTime;
00328         }
00329 
00330         edit.timeOffset = offset;
00331         offset += edit.trackDuration;
00332     }
00333 }
00334 
00335 QuickTimeDecoder::VideoTrackHandler::~VideoTrackHandler() {
00336     if (_scaledSurface) {
00337         _scaledSurface->free();
00338         delete _scaledSurface;
00339     }
00340 
00341     delete[] _forcedDitherPalette;
00342     delete[] _ditherTable;
00343 
00344     if (_ditherFrame) {
00345         _ditherFrame->free();
00346         delete _ditherFrame;
00347     }
00348 }
00349 
00350 bool QuickTimeDecoder::VideoTrackHandler::endOfTrack() const {
00351     // A track is over when we've finished going through all edits
00352     return _reversed ? (_curEdit == 0 && _curFrame < 0) : atLastEdit();
00353 }
00354 
00355 bool QuickTimeDecoder::VideoTrackHandler::seek(const Audio::Timestamp &requestedTime) {
00356     uint32 convertedFrames = requestedTime.convertToFramerate(_decoder->_timeScale).totalNumberOfFrames();
00357     for (_curEdit = 0; !atLastEdit(); _curEdit++)
00358         if (convertedFrames >= _parent->editList[_curEdit].timeOffset && convertedFrames < _parent->editList[_curEdit].timeOffset + _parent->editList[_curEdit].trackDuration)
00359             break;
00360 
00361     // If we did reach the end of the track, break out
00362     if (atLastEdit()) {
00363         // Call setReverse to set the position to the last frame of the last edit
00364         if (_reversed)
00365             setReverse(true);
00366         return true;
00367     }
00368 
00369     // If this track is in an empty edit, position us at the next non-empty
00370     // edit. There's nothing else to do after this.
00371     if (_parent->editList[_curEdit].mediaTime == -1) {
00372         while (!atLastEdit() && _parent->editList[_curEdit].mediaTime == -1)
00373             _curEdit++;
00374 
00375         if (!atLastEdit())
00376             enterNewEditList(true);
00377 
00378         return true;
00379     }
00380 
00381     enterNewEditList(false);
00382 
00383     // One extra check for the end of a track
00384     if (atLastEdit()) {
00385         // Call setReverse to set the position to the last frame of the last edit
00386         if (_reversed)
00387             setReverse(true);
00388         return true;
00389     }
00390 
00391     // Now we're in the edit and need to figure out what frame we need
00392     Audio::Timestamp time = requestedTime.convertToFramerate(_parent->timeScale);
00393     while (getRateAdjustedFrameTime() < (uint32)time.totalNumberOfFrames()) {
00394         _curFrame++;
00395         if (_durationOverride >= 0) {
00396             _nextFrameStartTime += _durationOverride;
00397             _durationOverride = -1;
00398         } else {
00399             _nextFrameStartTime += getFrameDuration();
00400         }
00401     }
00402 
00403     // Check if we went past, then adjust the frame times
00404     if (getRateAdjustedFrameTime() != (uint32)time.totalNumberOfFrames()) {
00405         _curFrame--;
00406         _durationOverride = getRateAdjustedFrameTime() - time.totalNumberOfFrames();
00407         _nextFrameStartTime = time.totalNumberOfFrames();
00408     }
00409 
00410     if (_reversed) {
00411         // Call setReverse again to update
00412         setReverse(true);
00413     } else {
00414         // Handle the keyframe here
00415         int32 destinationFrame = _curFrame + 1;
00416 
00417         assert(destinationFrame < (int32)_parent->frameCount);
00418         _curFrame = findKeyFrame(destinationFrame) - 1;
00419         while (_curFrame < destinationFrame - 1)
00420             bufferNextFrame();
00421     }
00422 
00423     return true;
00424 }
00425 
00426 Audio::Timestamp QuickTimeDecoder::VideoTrackHandler::getDuration() const {
00427     return Audio::Timestamp(0, _parent->duration, _decoder->_timeScale);
00428 }
00429 
00430 uint16 QuickTimeDecoder::VideoTrackHandler::getWidth() const {
00431     return getScaledWidth().toInt();
00432 }
00433 
00434 uint16 QuickTimeDecoder::VideoTrackHandler::getHeight() const {
00435     return getScaledHeight().toInt();
00436 }
00437 
00438 Graphics::PixelFormat QuickTimeDecoder::VideoTrackHandler::getPixelFormat() const {
00439     if (_forcedDitherPalette)
00440         return Graphics::PixelFormat::createFormatCLUT8();
00441 
00442     return ((VideoSampleDesc *)_parent->sampleDescs[0])->_videoCodec->getPixelFormat();
00443 }
00444 
00445 int QuickTimeDecoder::VideoTrackHandler::getFrameCount() const {
00446     return _parent->frameCount;
00447 }
00448 
00449 uint32 QuickTimeDecoder::VideoTrackHandler::getNextFrameStartTime() const {
00450     if (endOfTrack())
00451         return 0;
00452 
00453     Audio::Timestamp frameTime(0, getRateAdjustedFrameTime(), _parent->timeScale);
00454 
00455     // Check if the frame goes beyond the end of the edit. In that case, the next frame
00456     // should really be when we cross the edit boundary.
00457     if (_reversed) {
00458         Audio::Timestamp editStartTime(0, _parent->editList[_curEdit].timeOffset, _decoder->_timeScale);
00459         if (frameTime < editStartTime)
00460             return editStartTime.msecs();
00461     } else {
00462         Audio::Timestamp nextEditStartTime(0, _parent->editList[_curEdit].timeOffset + _parent->editList[_curEdit].trackDuration, _decoder->_timeScale);
00463         if (frameTime > nextEditStartTime)
00464             return nextEditStartTime.msecs();
00465     }
00466 
00467     // Not past an edit boundary, so the frame time is what should be used
00468     return frameTime.msecs();
00469 }
00470 
00471 const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::decodeNextFrame() {
00472     if (endOfTrack())
00473         return 0;
00474 
00475     if (_reversed) {
00476         // Subtract one to place us on the frame before the current displayed frame.
00477         _curFrame--;
00478 
00479         // We have one "dummy" frame at the end to so the last frame is displayed
00480         // for the right amount of time.
00481         if (_curFrame < 0)
00482             return 0;
00483 
00484         // Decode from the last key frame to the frame before the one we need.
00485         // TODO: Probably would be wise to do some caching
00486         int targetFrame = _curFrame;
00487         _curFrame = findKeyFrame(targetFrame) - 1;
00488         while (_curFrame != targetFrame - 1)
00489             bufferNextFrame();
00490     }
00491 
00492     // Update the edit list, if applicable
00493     // FIXME: Add support for playing backwards videos with more than one edit
00494     // For now, stay on the first edit for reversed playback
00495     if (endOfCurEdit() && !_reversed) {
00496         _curEdit++;
00497 
00498         if (atLastEdit())
00499             return 0;
00500 
00501         enterNewEditList(true);
00502     }
00503 
00504     const Graphics::Surface *frame = bufferNextFrame();
00505 
00506     if (_reversed) {
00507         if (_durationOverride >= 0) {
00508             // Use our own duration overridden from a media seek
00509             _nextFrameStartTime -= _durationOverride;
00510             _durationOverride = -1;
00511         } else {
00512             // Just need to subtract the time
00513             _nextFrameStartTime -= getFrameDuration();
00514         }
00515     } else {
00516         if (_durationOverride >= 0) {
00517             // Use our own duration overridden from a media seek
00518             _nextFrameStartTime += _durationOverride;
00519             _durationOverride = -1;
00520         } else {
00521             _nextFrameStartTime += getFrameDuration();
00522         }
00523     }
00524 
00525     // Handle forced dithering
00526     if (frame && _forcedDitherPalette)
00527         frame = forceDither(*frame);
00528 
00529     if (frame && (_parent->scaleFactorX != 1 || _parent->scaleFactorY != 1)) {
00530         if (!_scaledSurface) {
00531             _scaledSurface = new Graphics::Surface();
00532             _scaledSurface->create(getScaledWidth().toInt(), getScaledHeight().toInt(), getPixelFormat());
00533         }
00534 
00535         _decoder->scaleSurface(frame, _scaledSurface, _parent->scaleFactorX, _parent->scaleFactorY);
00536         return _scaledSurface;
00537     }
00538 
00539     return frame;
00540 }
00541 
00542 const byte *QuickTimeDecoder::VideoTrackHandler::getPalette() const {
00543     _dirtyPalette = false;
00544     return _forcedDitherPalette ? _forcedDitherPalette : _curPalette;
00545 }
00546 
00547 bool QuickTimeDecoder::VideoTrackHandler::setReverse(bool reverse) {
00548     _reversed = reverse;
00549 
00550     if (_reversed) {
00551         if (_parent->editList.size() != 1) {
00552             // TODO: Myst's holo.mov needs this :(
00553             warning("Can only set reverse without edits");
00554             return false;
00555         }
00556 
00557         if (atLastEdit()) {
00558             // If we're at the end of the video, go to the penultimate edit.
00559             // The current frame is set to one beyond the last frame here;
00560             // one "past" the currently displayed frame.
00561             _curEdit = _parent->editList.size() - 1;
00562             _curFrame = _parent->frameCount;
00563             _nextFrameStartTime = _parent->editList[_curEdit].trackDuration + _parent->editList[_curEdit].timeOffset;
00564         } else if (_durationOverride >= 0) {
00565             // We just had a media seek, so "pivot" around the frame that should
00566             // be displayed.
00567             _curFrame += 2;
00568             _nextFrameStartTime += _durationOverride;
00569         } else {
00570             // We need to put _curFrame to be the one after the one that should be displayed.
00571             // Since we're on the frame that should be displaying right now, add one.
00572             _curFrame++;
00573         }
00574     } else {
00575         // Update the edit list, if applicable
00576         if (!atLastEdit() && endOfCurEdit()) {
00577             _curEdit++;
00578 
00579             if (atLastEdit())
00580                 return true;
00581         }
00582 
00583         if (_durationOverride >= 0) {
00584             // We just had a media seek, so "pivot" around the frame that should
00585             // be displayed.
00586             _curFrame--;
00587             _nextFrameStartTime -= _durationOverride;
00588         }
00589 
00590         // We need to put _curFrame to be the one before the one that should be displayed.
00591         // Since we're on the frame that should be displaying right now, subtract one.
00592         // (As long as the current frame isn't -1, of course)
00593         if (_curFrame > 0) {
00594             // We then need to handle the keyframe situation
00595             int targetFrame = _curFrame - 1;
00596             _curFrame = findKeyFrame(targetFrame) - 1;
00597             while (_curFrame < targetFrame)
00598                 bufferNextFrame();
00599         } else if (_curFrame == 0) {
00600             // Make us start at the first frame (no keyframe needed)
00601             _curFrame--;
00602         }
00603     }
00604 
00605     return true;
00606 }
00607 
00608 Common::Rational QuickTimeDecoder::VideoTrackHandler::getScaledWidth() const {
00609     return Common::Rational(_parent->width) / _parent->scaleFactorX;
00610 }
00611 
00612 Common::Rational QuickTimeDecoder::VideoTrackHandler::getScaledHeight() const {
00613     return Common::Rational(_parent->height) / _parent->scaleFactorY;
00614 }
00615 
00616 Common::SeekableReadStream *QuickTimeDecoder::VideoTrackHandler::getNextFramePacket(uint32 &descId) {
00617     // First, we have to track down which chunk holds the sample and which sample in the chunk contains the frame we are looking for.
00618     int32 totalSampleCount = 0;
00619     int32 sampleInChunk = 0;
00620     int32 actualChunk = -1;
00621     uint32 sampleToChunkIndex = 0;
00622 
00623     for (uint32 i = 0; i < _parent->chunkCount; i++) {
00624         if (sampleToChunkIndex < _parent->sampleToChunkCount && i >= _parent->sampleToChunk[sampleToChunkIndex].first)
00625             sampleToChunkIndex++;
00626 
00627         totalSampleCount += _parent->sampleToChunk[sampleToChunkIndex - 1].count;
00628 
00629         if (totalSampleCount > _curFrame) {
00630             actualChunk = i;
00631             descId = _parent->sampleToChunk[sampleToChunkIndex - 1].id;
00632             sampleInChunk = _parent->sampleToChunk[sampleToChunkIndex - 1].count - totalSampleCount + _curFrame;
00633             break;
00634         }
00635     }
00636 
00637     if (actualChunk < 0)
00638         error("Could not find data for frame %d", _curFrame);
00639 
00640     // Next seek to that frame
00641     Common::SeekableReadStream *stream = _decoder->_fd;
00642     stream->seek(_parent->chunkOffsets[actualChunk]);
00643 
00644     // Then, if the chunk holds more than one frame, seek to where the frame we want is located
00645     for (int32 i = _curFrame - sampleInChunk; i < _curFrame; i++) {
00646         if (_parent->sampleSize != 0)
00647             stream->skip(_parent->sampleSize);
00648         else
00649             stream->skip(_parent->sampleSizes[i]);
00650     }
00651 
00652     // Finally, read in the raw data for the frame
00653     //debug("Frame Data[%d]: Offset = %d, Size = %d", _curFrame, stream->pos(), _parent->sampleSizes[_curFrame]);
00654 
00655     if (_parent->sampleSize != 0)
00656         return stream->readStream(_parent->sampleSize);
00657 
00658     return stream->readStream(_parent->sampleSizes[_curFrame]);
00659 }
00660 
00661 uint32 QuickTimeDecoder::VideoTrackHandler::getFrameDuration() {
00662     uint32 curFrameIndex = 0;
00663     for (int32 i = 0; i < _parent->timeToSampleCount; i++) {
00664         curFrameIndex += _parent->timeToSample[i].count;
00665         if ((uint32)_curFrame < curFrameIndex) {
00666             // Ok, now we have what duration this frame has.
00667             return _parent->timeToSample[i].duration;
00668         }
00669     }
00670 
00671     // This should never occur
00672     error("Cannot find duration for frame %d", _curFrame);
00673     return 0;
00674 }
00675 
00676 uint32 QuickTimeDecoder::VideoTrackHandler::findKeyFrame(uint32 frame) const {
00677     for (int i = _parent->keyframeCount - 1; i >= 0; i--)
00678         if (_parent->keyframes[i] <= frame)
00679             return _parent->keyframes[i];
00680 
00681     // If none found, we'll assume the requested frame is a key frame
00682     return frame;
00683 }
00684 
00685 void QuickTimeDecoder::VideoTrackHandler::enterNewEditList(bool bufferFrames) {
00686     // Bypass all empty edit lists first
00687     while (!atLastEdit() && _parent->editList[_curEdit].mediaTime == -1)
00688         _curEdit++;
00689 
00690     if (atLastEdit())
00691         return;
00692 
00693     uint32 mediaTime = _parent->editList[_curEdit].mediaTime;
00694     uint32 frameNum = 0;
00695     uint32 totalDuration = 0;
00696     _durationOverride = -1;
00697 
00698     // Track down where the mediaTime is in the media
00699     // This is basically time -> frame mapping
00700     // Note that this code uses first frame = 0
00701     for (int32 i = 0; i < _parent->timeToSampleCount; i++) {
00702         uint32 duration = _parent->timeToSample[i].count * _parent->timeToSample[i].duration;
00703 
00704         if (totalDuration + duration >= mediaTime) {
00705             uint32 frameInc = (mediaTime - totalDuration) / _parent->timeToSample[i].duration;
00706             frameNum += frameInc;
00707             totalDuration += frameInc * _parent->timeToSample[i].duration;
00708 
00709             // If we didn't get to the exact media time, mark an override for
00710             // the time.
00711             if (totalDuration != mediaTime)
00712                 _durationOverride = totalDuration + _parent->timeToSample[i].duration - mediaTime;
00713 
00714             break;
00715         }
00716 
00717         frameNum += _parent->timeToSample[i].count;
00718         totalDuration += duration;
00719     }
00720 
00721     if (bufferFrames) {
00722         // Track down the keyframe
00723         // Then decode until the frame before target
00724         _curFrame = findKeyFrame(frameNum) - 1;
00725         while (_curFrame < (int32)frameNum - 1)
00726             bufferNextFrame();
00727     } else {
00728         // Since frameNum is the frame that needs to be displayed
00729         // we'll set _curFrame to be the "last frame displayed"
00730         _curFrame = frameNum - 1;
00731     }
00732 
00733     _nextFrameStartTime = getCurEditTimeOffset();
00734 }
00735 
00736 const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::bufferNextFrame() {
00737     _curFrame++;
00738 
00739     // Get the next packet
00740     uint32 descId;
00741     Common::SeekableReadStream *frameData = getNextFramePacket(descId);
00742 
00743     if (!frameData || !descId || descId > _parent->sampleDescs.size()) {
00744         delete frameData;
00745         return 0;
00746     }
00747 
00748     // Find which video description entry we want
00749     VideoSampleDesc *entry = (VideoSampleDesc *)_parent->sampleDescs[descId - 1];
00750 
00751     if (!entry->_videoCodec) {
00752         delete frameData;
00753         return 0;
00754     }
00755 
00756     const Graphics::Surface *frame = entry->_videoCodec->decodeFrame(*frameData);
00757     delete frameData;
00758 
00759     // Update the palette
00760     if (entry->_videoCodec->containsPalette()) {
00761         // The codec itself contains a palette
00762         if (entry->_videoCodec->hasDirtyPalette()) {
00763             _curPalette = entry->_videoCodec->getPalette();
00764             _dirtyPalette = true;
00765         }
00766     } else {
00767         // Check if the video description has been updated
00768         byte *palette = entry->_palette;
00769 
00770         if (palette != _curPalette) {
00771             _curPalette = palette;
00772             _dirtyPalette = true;
00773         }
00774     }
00775 
00776     return frame;
00777 }
00778 
00779 uint32 QuickTimeDecoder::VideoTrackHandler::getRateAdjustedFrameTime() const {
00780     // Figure out what time the next frame is at taking the edit list rate into account
00781     Common::Rational offsetFromEdit = Common::Rational(_nextFrameStartTime - getCurEditTimeOffset()) / _parent->editList[_curEdit].mediaRate;
00782     uint32 convertedTime = offsetFromEdit.toInt();
00783 
00784     if ((offsetFromEdit.getNumerator() % offsetFromEdit.getDenominator()) > (offsetFromEdit.getDenominator() / 2))
00785         convertedTime++;
00786 
00787     return convertedTime + getCurEditTimeOffset();
00788 }
00789 
00790 uint32 QuickTimeDecoder::VideoTrackHandler::getCurEditTimeOffset() const {
00791     // Need to convert to the track scale
00792 
00793     // We have to round the time off to the nearest in the scale, otherwise
00794     // bad things happen. QuickTime docs are pretty silent on all this stuff,
00795     // so this was found from samples. It doesn't help that this is really
00796     // the only open source implementation of QuickTime edits.
00797 
00798     uint32 mult = _parent->editList[_curEdit].timeOffset * _parent->timeScale;
00799     uint32 result = mult / _decoder->_timeScale;
00800 
00801     if ((mult % _decoder->_timeScale) > (_decoder->_timeScale / 2))
00802         result++;
00803 
00804     return result;
00805 }
00806 
00807 uint32 QuickTimeDecoder::VideoTrackHandler::getCurEditTrackDuration() const {
00808     // Need to convert to the track scale
00809     return _parent->editList[_curEdit].trackDuration * _parent->timeScale / _decoder->_timeScale;
00810 }
00811 
00812 bool QuickTimeDecoder::VideoTrackHandler::atLastEdit() const {
00813     return _curEdit == _parent->editList.size();
00814 }
00815 
00816 bool QuickTimeDecoder::VideoTrackHandler::endOfCurEdit() const {
00817     // We're at the end of the edit once the next frame's time would
00818     // bring us past the end of the edit.
00819     return getRateAdjustedFrameTime() >= getCurEditTimeOffset() + getCurEditTrackDuration();
00820 }
00821 
00822 bool QuickTimeDecoder::VideoTrackHandler::canDither() const {
00823     for (uint i = 0; i < _parent->sampleDescs.size(); i++) {
00824         VideoSampleDesc *desc = (VideoSampleDesc *)_parent->sampleDescs[i];
00825 
00826         if (!desc || !desc->_videoCodec)
00827             return false;
00828     }
00829 
00830     return true;
00831 }
00832 
00833 void QuickTimeDecoder::VideoTrackHandler::setDither(const byte *palette) {
00834     assert(canDither());
00835 
00836     for (uint i = 0; i < _parent->sampleDescs.size(); i++) {
00837         VideoSampleDesc *desc = (VideoSampleDesc *)_parent->sampleDescs[i];
00838 
00839         if (desc->_videoCodec->canDither(Image::Codec::kDitherTypeQT)) {
00840             // Codec dither
00841             desc->_videoCodec->setDither(Image::Codec::kDitherTypeQT, palette);
00842         } else {
00843             // Forced dither
00844             _forcedDitherPalette = new byte[256 * 3];
00845             memcpy(_forcedDitherPalette, palette, 256 * 3);
00846             _ditherTable = Image::Codec::createQuickTimeDitherTable(_forcedDitherPalette, 256);
00847             _dirtyPalette = true;
00848         }
00849     }
00850 }
00851 
00852 namespace {
00853 
00854 // Return a pixel in RGB554
00855 uint16 makeDitherColor(byte r, byte g, byte b) {
00856     return ((r & 0xF8) << 6) | ((g & 0xF8) << 1) | (b >> 4);
00857 }
00858 
00859 // Default template to convert a dither color
00860 template<typename PixelInt>
00861 inline uint16 readDitherColor(PixelInt srcColor, const Graphics::PixelFormat& format, const byte *palette) {
00862     byte r, g, b;
00863     format.colorToRGB(srcColor, r, g, b);
00864     return makeDitherColor(r, g, b);
00865 }
00866 
00867 // Specialized version for 8bpp
00868 template<>
00869 inline uint16 readDitherColor(byte srcColor, const Graphics::PixelFormat& format, const byte *palette) {
00870     return makeDitherColor(palette[srcColor * 3], palette[srcColor * 3 + 1], palette[srcColor * 3 + 2]);
00871 }
00872 
00873 template<typename PixelInt>
00874 void ditherFrame(const Graphics::Surface &src, Graphics::Surface &dst, const byte *ditherTable, const byte *palette = 0) {
00875     static const uint16 colorTableOffsets[] = { 0x0000, 0xC000, 0x4000, 0x8000 };
00876 
00877     for (int y = 0; y < dst.h; y++) {
00878         const PixelInt *srcPtr = (const PixelInt *)src.getBasePtr(0, y);
00879         byte *dstPtr = (byte *)dst.getBasePtr(0, y);
00880         uint16 colorTableOffset = colorTableOffsets[y & 3];
00881 
00882         for (int x = 0; x < dst.w; x++) {
00883             uint16 color = readDitherColor(*srcPtr++, src.format, palette);
00884             *dstPtr++ = ditherTable[colorTableOffset + color];
00885             colorTableOffset += 0x4000;
00886         }
00887     }
00888 }
00889 
00890 } // End of anonymous namespace
00891 
00892 const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::forceDither(const Graphics::Surface &frame) {
00893     if (frame.format.bytesPerPixel == 1) {
00894         // This should always be true, but this is for sanity
00895         if (!_curPalette)
00896             return &frame;
00897 
00898         // If the palettes match, bail out
00899         if (memcmp(_forcedDitherPalette, _curPalette, 256 * 3) == 0)
00900             return &frame;
00901     }
00902 
00903     // Need to create a new one
00904     if (!_ditherFrame) {
00905         _ditherFrame = new Graphics::Surface();
00906         _ditherFrame->create(frame.w, frame.h, Graphics::PixelFormat::createFormatCLUT8());
00907     }
00908 
00909     if (frame.format.bytesPerPixel == 1)
00910         ditherFrame<byte>(frame, *_ditherFrame, _ditherTable, _curPalette);
00911     else if (frame.format.bytesPerPixel == 2)
00912         ditherFrame<uint16>(frame, *_ditherFrame, _ditherTable);
00913     else if (frame.format.bytesPerPixel == 4)
00914         ditherFrame<uint32>(frame, *_ditherFrame, _ditherTable);
00915 
00916     return _ditherFrame;
00917 }
00918 
00919 } // End of namespace Video


Generated on Sat Jul 13 2019 05:01:21 for ResidualVM by doxygen 1.7.1
curved edge   curved edge