ResidualVM logo ResidualVM website - Forums - Contact us BuildBot - Doxygen - Wiki curved edge

qt_decoder.cpp

Go to the documentation of this file.
00001 /* ScummVM - Graphic Adventure Engine
00002  *
00003  * ScummVM is the legal property of its developers, whose names
00004  * are too numerous to list here. Please refer to the COPYRIGHT
00005  * file distributed with this source distribution.
00006  *
00007  * This program is free software; you can redistribute it and/or
00008  * modify it under the terms of the GNU General Public License
00009  * as published by the Free Software Foundation; either version 2
00010  * of the License, or (at your option) any later version.
00011  *
00012  * This program is distributed in the hope that it will be useful,
00013  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00014  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00015  * GNU General Public License for more details.
00016  *
00017  * You should have received a copy of the GNU General Public License
00018  * along with this program; if not, write to the Free Software
00019  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
00020  *
00021  */
00022 
00023 //
00024 // Partially based on ffmpeg code.
00025 //
00026 // Copyright (c) 2001 Fabrice Bellard.
00027 // First version by Francois Revol revol@free.fr
00028 // Seek function by Gael Chardon gael.dev@4now.net
00029 //
00030 
00031 #include "video/qt_decoder.h"
00032 
00033 #include "audio/audiostream.h"
00034 
00035 #include "common/debug.h"
00036 #include "common/memstream.h"
00037 #include "common/system.h"
00038 #include "common/textconsole.h"
00039 #include "common/util.h"
00040 
00041 // Video codecs
00042 #include "image/codecs/codec.h"
00043 
00044 namespace Video {
00045 
00047 // QuickTimeDecoder
00049 
00050 QuickTimeDecoder::QuickTimeDecoder() {
00051     _scaledSurface = 0;
00052     _width = _height = 0;
00053 }
00054 
00055 QuickTimeDecoder::~QuickTimeDecoder() {
00056     close();
00057 }
00058 
00059 bool QuickTimeDecoder::loadFile(const Common::String &filename) {
00060     if (!Common::QuickTimeParser::parseFile(filename))
00061         return false;
00062 
00063     init();
00064     return true;
00065 }
00066 
00067 bool QuickTimeDecoder::loadStream(Common::SeekableReadStream *stream) {
00068     if (!Common::QuickTimeParser::parseStream(stream))
00069         return false;
00070 
00071     init();
00072     return true;
00073 }
00074 
00075 void QuickTimeDecoder::close() {
00076     VideoDecoder::close();
00077     Common::QuickTimeParser::close();
00078 
00079     if (_scaledSurface) {
00080         _scaledSurface->free();
00081         delete _scaledSurface;
00082         _scaledSurface = 0;
00083     }
00084 }
00085 
00086 const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
00087     const Graphics::Surface *frame = VideoDecoder::decodeNextFrame();
00088 
00089     // Update audio buffers too
00090     // (needs to be done after we find the next track)
00091     updateAudioBuffer();
00092 
00093     // We have to initialize the scaled surface
00094     if (frame && (_scaleFactorX != 1 || _scaleFactorY != 1)) {
00095         if (!_scaledSurface) {
00096             _scaledSurface = new Graphics::Surface();
00097             _scaledSurface->create(_width, _height, getPixelFormat());
00098         }
00099 
00100         scaleSurface(frame, _scaledSurface, _scaleFactorX, _scaleFactorY);
00101         return _scaledSurface;
00102     }
00103 
00104     return frame;
00105 }
00106 
00107 Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(Common::QuickTimeParser::Track *track, uint32 format, uint32 descSize) {
00108     if (track->codecType == CODEC_TYPE_VIDEO) {
00109         debug(0, "Video Codec FourCC: \'%s\'", tag2str(format));
00110 
00111         VideoSampleDesc *entry = new VideoSampleDesc(track, format);
00112 
00113         _fd->readUint16BE(); // version
00114         _fd->readUint16BE(); // revision level
00115         _fd->readUint32BE(); // vendor
00116         _fd->readUint32BE(); // temporal quality
00117         _fd->readUint32BE(); // spacial quality
00118 
00119         uint16 width = _fd->readUint16BE(); // width
00120         uint16 height = _fd->readUint16BE(); // height
00121 
00122         // The width is most likely invalid for entries after the first one
00123         // so only set the overall width if it is not zero here.
00124         if (width)
00125             track->width = width;
00126 
00127         if (height)
00128             track->height = height;
00129 
00130         _fd->readUint32BE(); // horiz resolution
00131         _fd->readUint32BE(); // vert resolution
00132         _fd->readUint32BE(); // data size, always 0
00133         _fd->readUint16BE(); // frames per samples
00134 
00135         byte codecName[32];
00136         _fd->read(codecName, 32); // codec name, pascal string (FIXME: true for mp4?)
00137         if (codecName[0] <= 31) {
00138             memcpy(entry->_codecName, &codecName[1], codecName[0]);
00139             entry->_codecName[codecName[0]] = 0;
00140         }
00141 
00142         entry->_bitsPerSample = _fd->readUint16BE(); // depth
00143         entry->_colorTableId = _fd->readUint16BE(); // colortable id
00144 
00145         // figure out the palette situation
00146         byte colorDepth = entry->_bitsPerSample & 0x1F;
00147         bool colorGreyscale = (entry->_bitsPerSample & 0x20) != 0;
00148 
00149         debug(0, "color depth: %d", colorDepth);
00150 
00151         // if the depth is 2, 4, or 8 bpp, file is palettized
00152         if (colorDepth == 2 || colorDepth == 4 || colorDepth == 8) {
00153             // Initialize the palette
00154             entry->_palette = new byte[256 * 3];
00155             memset(entry->_palette, 0, 256 * 3);
00156 
00157             if (colorGreyscale) {
00158                 debug(0, "Greyscale palette");
00159 
00160                 // compute the greyscale palette
00161                 uint16 colorCount = 1 << colorDepth;
00162                 int16 colorIndex = 255;
00163                 byte colorDec = 256 / (colorCount - 1);
00164                 for (byte j = 0; j < colorCount; j++) {
00165                     entry->_palette[j * 3] = entry->_palette[j * 3 + 1] = entry->_palette[j * 3 + 2] = colorIndex;
00166                     colorIndex -= colorDec;
00167                     if (colorIndex < 0)
00168                         colorIndex = 0;
00169                 }
00170             } else if (entry->_colorTableId & 0x08) {
00171                 // if flag bit 3 is set, use the default palette
00172                 //uint16 colorCount = 1 << colorDepth;
00173 
00174                 warning("Predefined palette! %dbpp", colorDepth);
00175             } else {
00176                 debug(0, "Palette from file");
00177 
00178                 // load the palette from the file
00179                 uint32 colorStart = _fd->readUint32BE();
00180                 /* uint16 colorCount = */ _fd->readUint16BE();
00181                 uint16 colorEnd = _fd->readUint16BE();
00182                 for (uint32 j = colorStart; j <= colorEnd; j++) {
00183                     // each R, G, or B component is 16 bits;
00184                     // only use the top 8 bits; skip alpha bytes
00185                     // up front
00186                     _fd->readByte();
00187                     _fd->readByte();
00188                     entry->_palette[j * 3] = _fd->readByte();
00189                     _fd->readByte();
00190                     entry->_palette[j * 3 + 1] = _fd->readByte();
00191                     _fd->readByte();
00192                     entry->_palette[j * 3 + 2] = _fd->readByte();
00193                     _fd->readByte();
00194                 }
00195             }
00196         }
00197 
00198         return entry;
00199     }
00200 
00201     // Pass it on up
00202     return Audio::QuickTimeAudioDecoder::readSampleDesc(track, format, descSize);
00203 }
00204 
00205 void QuickTimeDecoder::init() {
00206     Audio::QuickTimeAudioDecoder::init();
00207 
00208     // Initialize all the audio tracks
00209     for (uint32 i = 0; i < _audioTracks.size(); i++)
00210         addTrack(new AudioTrackHandler(this, _audioTracks[i]));
00211 
00212     // Initialize all the video tracks
00213     const Common::Array<Common::QuickTimeParser::Track *> &tracks = Common::QuickTimeParser::_tracks;
00214     for (uint32 i = 0; i < tracks.size(); i++) {
00215         if (tracks[i]->codecType == CODEC_TYPE_VIDEO) {
00216             for (uint32 j = 0; j < tracks[i]->sampleDescs.size(); j++)
00217                 ((VideoSampleDesc *)tracks[i]->sampleDescs[j])->initCodec();
00218 
00219             addTrack(new VideoTrackHandler(this, tracks[i]));
00220         }
00221     }
00222 
00223     // Prepare the first video track
00224     VideoTrackHandler *nextVideoTrack = (VideoTrackHandler *)findNextVideoTrack();
00225 
00226     if (nextVideoTrack) {
00227         if (_scaleFactorX != 1 || _scaleFactorY != 1) {
00228             // We have to take the scale into consideration when setting width/height
00229             _width = (nextVideoTrack->getScaledWidth() / _scaleFactorX).toInt();
00230             _height = (nextVideoTrack->getScaledHeight() / _scaleFactorY).toInt();
00231         } else {
00232             _width = nextVideoTrack->getWidth();
00233             _height = nextVideoTrack->getHeight();
00234         }
00235     }
00236 }
00237 
00238 void QuickTimeDecoder::updateAudioBuffer() {
00239     // Updates the audio buffers for all audio tracks
00240     for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++)
00241         if ((*it)->getTrackType() == VideoDecoder::Track::kTrackTypeAudio)
00242             ((AudioTrackHandler *)*it)->updateBuffer();
00243 }
00244 
00245 void QuickTimeDecoder::scaleSurface(const Graphics::Surface *src, Graphics::Surface *dst, const Common::Rational &scaleFactorX, const Common::Rational &scaleFactorY) {
00246     assert(src && dst);
00247 
00248     for (int32 j = 0; j < dst->h; j++)
00249         for (int32 k = 0; k < dst->w; k++)
00250             memcpy(dst->getBasePtr(k, j), src->getBasePtr((k * scaleFactorX).toInt() , (j * scaleFactorY).toInt()), src->format.bytesPerPixel);
00251 }
00252 
00253 QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentTrack, codecTag) {
00254     memset(_codecName, 0, 32);
00255     _colorTableId = 0;
00256     _palette = 0;
00257     _videoCodec = 0;
00258     _bitsPerSample = 0;
00259 }
00260 
00261 QuickTimeDecoder::VideoSampleDesc::~VideoSampleDesc() {
00262     delete[] _palette;
00263     delete _videoCodec;
00264 }
00265 
00266 void QuickTimeDecoder::VideoSampleDesc::initCodec() {
00267     _videoCodec = Image::createQuickTimeCodec(_codecTag, _parentTrack->width, _parentTrack->height, _bitsPerSample & 0x1f);
00268 }
00269 
00270 QuickTimeDecoder::AudioTrackHandler::AudioTrackHandler(QuickTimeDecoder *decoder, QuickTimeAudioTrack *audioTrack) :
00271         SeekableAudioTrack(decoder->getSoundType()),
00272         _decoder(decoder),
00273         _audioTrack(audioTrack) {
00274 }
00275 
00276 void QuickTimeDecoder::AudioTrackHandler::updateBuffer() {
00277     if (_decoder->endOfVideoTracks()) // If we have no video left (or no video), there's nothing to base our buffer against
00278         _audioTrack->queueRemainingAudio();
00279     else // Otherwise, queue enough to get us to the next frame plus another half second spare
00280         _audioTrack->queueAudio(Audio::Timestamp(_decoder->getTimeToNextFrame() + 500, 1000));
00281 }
00282 
00283 Audio::SeekableAudioStream *QuickTimeDecoder::AudioTrackHandler::getSeekableAudioStream() const {
00284     return _audioTrack;
00285 }
00286 
00287 QuickTimeDecoder::VideoTrackHandler::VideoTrackHandler(QuickTimeDecoder *decoder, Common::QuickTimeParser::Track *parent) : _decoder(decoder), _parent(parent) {
00288     checkEditListBounds();
00289 
00290     _curEdit = 0;
00291     enterNewEditList(false);
00292 
00293     _curFrame = -1;
00294     _durationOverride = -1;
00295     _scaledSurface = 0;
00296     _curPalette = 0;
00297     _dirtyPalette = false;
00298     _reversed = false;
00299     _forcedDitherPalette = 0;
00300     _ditherTable = 0;
00301     _ditherFrame = 0;
00302 }
00303 
00304 void QuickTimeDecoder::VideoTrackHandler::checkEditListBounds() {
00305     // Check all the edit list entries are within the bounds of the media
00306     // In the Spanish version of Riven, the last edit of the video ogk.mov
00307     // ends one frame after the end of the media.
00308 
00309     uint32 offset = 0;
00310     uint32 mediaDuration = _parent->mediaDuration * _decoder->_timeScale / _parent->timeScale;
00311 
00312     for (uint i = 0; i < _parent->editList.size(); i++) {
00313         EditListEntry &edit = _parent->editList[i];
00314 
00315         if (edit.mediaTime < 0) {
00316             continue; // Ignore empty edits
00317         }
00318 
00319         if ((uint32) edit.mediaTime > mediaDuration) {
00320             // Check if the edit starts after the end of the media
00321             // If so, mark it as empty so it is ignored
00322             edit.mediaTime = -1;
00323         } else if (edit.mediaTime + edit.trackDuration > mediaDuration) {
00324             // Check if the edit ends after the end of the media
00325             // If so, clip it so it fits in the media
00326             edit.trackDuration = mediaDuration - edit.mediaTime;
00327         }
00328 
00329         edit.timeOffset = offset;
00330         offset += edit.trackDuration;
00331     }
00332 }
00333 
00334 QuickTimeDecoder::VideoTrackHandler::~VideoTrackHandler() {
00335     if (_scaledSurface) {
00336         _scaledSurface->free();
00337         delete _scaledSurface;
00338     }
00339 
00340     delete[] _forcedDitherPalette;
00341     delete[] _ditherTable;
00342 
00343     if (_ditherFrame) {
00344         _ditherFrame->free();
00345         delete _ditherFrame;
00346     }
00347 }
00348 
00349 bool QuickTimeDecoder::VideoTrackHandler::endOfTrack() const {
00350     // A track is over when we've finished going through all edits
00351     return _reversed ? (_curEdit == 0 && _curFrame < 0) : atLastEdit();
00352 }
00353 
00354 bool QuickTimeDecoder::VideoTrackHandler::seek(const Audio::Timestamp &requestedTime) {
00355     uint32 convertedFrames = requestedTime.convertToFramerate(_decoder->_timeScale).totalNumberOfFrames();
00356     for (_curEdit = 0; !atLastEdit(); _curEdit++)
00357         if (convertedFrames >= _parent->editList[_curEdit].timeOffset && convertedFrames < _parent->editList[_curEdit].timeOffset + _parent->editList[_curEdit].trackDuration)
00358             break;
00359 
00360     // If we did reach the end of the track, break out
00361     if (atLastEdit()) {
00362         // Call setReverse to set the position to the last frame of the last edit
00363         if (_reversed)
00364             setReverse(true);
00365         return true;
00366     }
00367 
00368     // If this track is in an empty edit, position us at the next non-empty
00369     // edit. There's nothing else to do after this.
00370     if (_parent->editList[_curEdit].mediaTime == -1) {
00371         while (!atLastEdit() && _parent->editList[_curEdit].mediaTime == -1)
00372             _curEdit++;
00373 
00374         if (!atLastEdit())
00375             enterNewEditList(true);
00376 
00377         return true;
00378     }
00379 
00380     enterNewEditList(false);
00381 
00382     // One extra check for the end of a track
00383     if (atLastEdit()) {
00384         // Call setReverse to set the position to the last frame of the last edit
00385         if (_reversed)
00386             setReverse(true);
00387         return true;
00388     }
00389 
00390     // Now we're in the edit and need to figure out what frame we need
00391     Audio::Timestamp time = requestedTime.convertToFramerate(_parent->timeScale);
00392     while (getRateAdjustedFrameTime() < (uint32)time.totalNumberOfFrames()) {
00393         _curFrame++;
00394         if (_durationOverride >= 0) {
00395             _nextFrameStartTime += _durationOverride;
00396             _durationOverride = -1;
00397         } else {
00398             _nextFrameStartTime += getFrameDuration();
00399         }
00400     }
00401 
00402     // Check if we went past, then adjust the frame times
00403     if (getRateAdjustedFrameTime() != (uint32)time.totalNumberOfFrames()) {
00404         _curFrame--;
00405         _durationOverride = getRateAdjustedFrameTime() - time.totalNumberOfFrames();
00406         _nextFrameStartTime = time.totalNumberOfFrames();
00407     }
00408 
00409     if (_reversed) {
00410         // Call setReverse again to update
00411         setReverse(true);
00412     } else {
00413         // Handle the keyframe here
00414         int32 destinationFrame = _curFrame + 1;
00415 
00416         assert(destinationFrame < (int32)_parent->frameCount);
00417         _curFrame = findKeyFrame(destinationFrame) - 1;
00418         while (_curFrame < destinationFrame - 1)
00419             bufferNextFrame();
00420     }
00421 
00422     return true;
00423 }
00424 
00425 Audio::Timestamp QuickTimeDecoder::VideoTrackHandler::getDuration() const {
00426     return Audio::Timestamp(0, _parent->duration, _decoder->_timeScale);
00427 }
00428 
00429 uint16 QuickTimeDecoder::VideoTrackHandler::getWidth() const {
00430     return getScaledWidth().toInt();
00431 }
00432 
00433 uint16 QuickTimeDecoder::VideoTrackHandler::getHeight() const {
00434     return getScaledHeight().toInt();
00435 }
00436 
00437 Graphics::PixelFormat QuickTimeDecoder::VideoTrackHandler::getPixelFormat() const {
00438     if (_forcedDitherPalette)
00439         return Graphics::PixelFormat::createFormatCLUT8();
00440 
00441     return ((VideoSampleDesc *)_parent->sampleDescs[0])->_videoCodec->getPixelFormat();
00442 }
00443 
00444 int QuickTimeDecoder::VideoTrackHandler::getFrameCount() const {
00445     return _parent->frameCount;
00446 }
00447 
00448 uint32 QuickTimeDecoder::VideoTrackHandler::getNextFrameStartTime() const {
00449     if (endOfTrack())
00450         return 0;
00451 
00452     Audio::Timestamp frameTime(0, getRateAdjustedFrameTime(), _parent->timeScale);
00453 
00454     // Check if the frame goes beyond the end of the edit. In that case, the next frame
00455     // should really be when we cross the edit boundary.
00456     if (_reversed) {
00457         Audio::Timestamp editStartTime(0, _parent->editList[_curEdit].timeOffset, _decoder->_timeScale);
00458         if (frameTime < editStartTime)
00459             return editStartTime.msecs();
00460     } else {
00461         Audio::Timestamp nextEditStartTime(0, _parent->editList[_curEdit].timeOffset + _parent->editList[_curEdit].trackDuration, _decoder->_timeScale);
00462         if (frameTime > nextEditStartTime)
00463             return nextEditStartTime.msecs();
00464     }
00465 
00466     // Not past an edit boundary, so the frame time is what should be used
00467     return frameTime.msecs();
00468 }
00469 
00470 const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::decodeNextFrame() {
00471     if (endOfTrack())
00472         return 0;
00473 
00474     if (_reversed) {
00475         // Subtract one to place us on the frame before the current displayed frame.
00476         _curFrame--;
00477 
00478         // We have one "dummy" frame at the end to so the last frame is displayed
00479         // for the right amount of time.
00480         if (_curFrame < 0)
00481             return 0;
00482 
00483         // Decode from the last key frame to the frame before the one we need.
00484         // TODO: Probably would be wise to do some caching
00485         int targetFrame = _curFrame;
00486         _curFrame = findKeyFrame(targetFrame) - 1;
00487         while (_curFrame != targetFrame - 1)
00488             bufferNextFrame();
00489     }
00490 
00491     // Update the edit list, if applicable
00492     // FIXME: Add support for playing backwards videos with more than one edit
00493     // For now, stay on the first edit for reversed playback
00494     if (endOfCurEdit() && !_reversed) {
00495         _curEdit++;
00496 
00497         if (atLastEdit())
00498             return 0;
00499 
00500         enterNewEditList(true);
00501     }
00502 
00503     const Graphics::Surface *frame = bufferNextFrame();
00504 
00505     if (_reversed) {
00506         if (_durationOverride >= 0) {
00507             // Use our own duration overridden from a media seek
00508             _nextFrameStartTime -= _durationOverride;
00509             _durationOverride = -1;
00510         } else {
00511             // Just need to subtract the time
00512             _nextFrameStartTime -= getFrameDuration();
00513         }
00514     } else {
00515         if (_durationOverride >= 0) {
00516             // Use our own duration overridden from a media seek
00517             _nextFrameStartTime += _durationOverride;
00518             _durationOverride = -1;
00519         } else {
00520             _nextFrameStartTime += getFrameDuration();
00521         }
00522     }
00523 
00524     // Handle forced dithering
00525     if (frame && _forcedDitherPalette)
00526         frame = forceDither(*frame);
00527 
00528     if (frame && (_parent->scaleFactorX != 1 || _parent->scaleFactorY != 1)) {
00529         if (!_scaledSurface) {
00530             _scaledSurface = new Graphics::Surface();
00531             _scaledSurface->create(getScaledWidth().toInt(), getScaledHeight().toInt(), getPixelFormat());
00532         }
00533 
00534         _decoder->scaleSurface(frame, _scaledSurface, _parent->scaleFactorX, _parent->scaleFactorY);
00535         return _scaledSurface;
00536     }
00537 
00538     return frame;
00539 }
00540 
00541 const byte *QuickTimeDecoder::VideoTrackHandler::getPalette() const {
00542     _dirtyPalette = false;
00543     return _forcedDitherPalette ? _forcedDitherPalette : _curPalette;
00544 }
00545 
00546 bool QuickTimeDecoder::VideoTrackHandler::setReverse(bool reverse) {
00547     _reversed = reverse;
00548 
00549     if (_reversed) {
00550         if (_parent->editList.size() != 1) {
00551             // TODO: Myst's holo.mov needs this :(
00552             warning("Can only set reverse without edits");
00553             return false;
00554         }
00555 
00556         if (atLastEdit()) {
00557             // If we're at the end of the video, go to the penultimate edit.
00558             // The current frame is set to one beyond the last frame here;
00559             // one "past" the currently displayed frame.
00560             _curEdit = _parent->editList.size() - 1;
00561             _curFrame = _parent->frameCount;
00562             _nextFrameStartTime = _parent->editList[_curEdit].trackDuration + _parent->editList[_curEdit].timeOffset;
00563         } else if (_durationOverride >= 0) {
00564             // We just had a media seek, so "pivot" around the frame that should
00565             // be displayed.
00566             _curFrame += 2;
00567             _nextFrameStartTime += _durationOverride;
00568         } else {
00569             // We need to put _curFrame to be the one after the one that should be displayed.
00570             // Since we're on the frame that should be displaying right now, add one.
00571             _curFrame++;
00572         }
00573     } else {
00574         // Update the edit list, if applicable
00575         if (!atLastEdit() && endOfCurEdit()) {
00576             _curEdit++;
00577 
00578             if (atLastEdit())
00579                 return true;
00580         }
00581 
00582         if (_durationOverride >= 0) {
00583             // We just had a media seek, so "pivot" around the frame that should
00584             // be displayed.
00585             _curFrame--;
00586             _nextFrameStartTime -= _durationOverride;
00587         }
00588 
00589         // We need to put _curFrame to be the one before the one that should be displayed.
00590         // Since we're on the frame that should be displaying right now, subtract one.
00591         // (As long as the current frame isn't -1, of course)
00592         if (_curFrame > 0) {
00593             // We then need to handle the keyframe situation
00594             int targetFrame = _curFrame - 1;
00595             _curFrame = findKeyFrame(targetFrame) - 1;
00596             while (_curFrame < targetFrame)
00597                 bufferNextFrame();
00598         } else if (_curFrame == 0) {
00599             // Make us start at the first frame (no keyframe needed)
00600             _curFrame--;
00601         }
00602     }
00603 
00604     return true;
00605 }
00606 
00607 Common::Rational QuickTimeDecoder::VideoTrackHandler::getScaledWidth() const {
00608     return Common::Rational(_parent->width) / _parent->scaleFactorX;
00609 }
00610 
00611 Common::Rational QuickTimeDecoder::VideoTrackHandler::getScaledHeight() const {
00612     return Common::Rational(_parent->height) / _parent->scaleFactorY;
00613 }
00614 
00615 Common::SeekableReadStream *QuickTimeDecoder::VideoTrackHandler::getNextFramePacket(uint32 &descId) {
00616     // First, we have to track down which chunk holds the sample and which sample in the chunk contains the frame we are looking for.
00617     int32 totalSampleCount = 0;
00618     int32 sampleInChunk = 0;
00619     int32 actualChunk = -1;
00620     uint32 sampleToChunkIndex = 0;
00621 
00622     for (uint32 i = 0; i < _parent->chunkCount; i++) {
00623         if (sampleToChunkIndex < _parent->sampleToChunkCount && i >= _parent->sampleToChunk[sampleToChunkIndex].first)
00624             sampleToChunkIndex++;
00625 
00626         totalSampleCount += _parent->sampleToChunk[sampleToChunkIndex - 1].count;
00627 
00628         if (totalSampleCount > _curFrame) {
00629             actualChunk = i;
00630             descId = _parent->sampleToChunk[sampleToChunkIndex - 1].id;
00631             sampleInChunk = _parent->sampleToChunk[sampleToChunkIndex - 1].count - totalSampleCount + _curFrame;
00632             break;
00633         }
00634     }
00635 
00636     if (actualChunk < 0)
00637         error("Could not find data for frame %d", _curFrame);
00638 
00639     // Next seek to that frame
00640     Common::SeekableReadStream *stream = _decoder->_fd;
00641     stream->seek(_parent->chunkOffsets[actualChunk]);
00642 
00643     // Then, if the chunk holds more than one frame, seek to where the frame we want is located
00644     for (int32 i = _curFrame - sampleInChunk; i < _curFrame; i++) {
00645         if (_parent->sampleSize != 0)
00646             stream->skip(_parent->sampleSize);
00647         else
00648             stream->skip(_parent->sampleSizes[i]);
00649     }
00650 
00651     // Finally, read in the raw data for the frame
00652     //debug("Frame Data[%d]: Offset = %d, Size = %d", _curFrame, stream->pos(), _parent->sampleSizes[_curFrame]);
00653 
00654     if (_parent->sampleSize != 0)
00655         return stream->readStream(_parent->sampleSize);
00656 
00657     return stream->readStream(_parent->sampleSizes[_curFrame]);
00658 }
00659 
00660 uint32 QuickTimeDecoder::VideoTrackHandler::getFrameDuration() {
00661     uint32 curFrameIndex = 0;
00662     for (int32 i = 0; i < _parent->timeToSampleCount; i++) {
00663         curFrameIndex += _parent->timeToSample[i].count;
00664         if ((uint32)_curFrame < curFrameIndex) {
00665             // Ok, now we have what duration this frame has.
00666             return _parent->timeToSample[i].duration;
00667         }
00668     }
00669 
00670     // This should never occur
00671     error("Cannot find duration for frame %d", _curFrame);
00672     return 0;
00673 }
00674 
00675 uint32 QuickTimeDecoder::VideoTrackHandler::findKeyFrame(uint32 frame) const {
00676     for (int i = _parent->keyframeCount - 1; i >= 0; i--)
00677         if (_parent->keyframes[i] <= frame)
00678             return _parent->keyframes[i];
00679 
00680     // If none found, we'll assume the requested frame is a key frame
00681     return frame;
00682 }
00683 
00684 void QuickTimeDecoder::VideoTrackHandler::enterNewEditList(bool bufferFrames) {
00685     // Bypass all empty edit lists first
00686     while (!atLastEdit() && _parent->editList[_curEdit].mediaTime == -1)
00687         _curEdit++;
00688 
00689     if (atLastEdit())
00690         return;
00691 
00692     uint32 mediaTime = _parent->editList[_curEdit].mediaTime;
00693     uint32 frameNum = 0;
00694     uint32 totalDuration = 0;
00695     _durationOverride = -1;
00696 
00697     // Track down where the mediaTime is in the media
00698     // This is basically time -> frame mapping
00699     // Note that this code uses first frame = 0
00700     for (int32 i = 0; i < _parent->timeToSampleCount; i++) {
00701         uint32 duration = _parent->timeToSample[i].count * _parent->timeToSample[i].duration;
00702 
00703         if (totalDuration + duration >= mediaTime) {
00704             uint32 frameInc = (mediaTime - totalDuration) / _parent->timeToSample[i].duration;
00705             frameNum += frameInc;
00706             totalDuration += frameInc * _parent->timeToSample[i].duration;
00707 
00708             // If we didn't get to the exact media time, mark an override for
00709             // the time.
00710             if (totalDuration != mediaTime)
00711                 _durationOverride = totalDuration + _parent->timeToSample[i].duration - mediaTime;
00712 
00713             break;
00714         }
00715 
00716         frameNum += _parent->timeToSample[i].count;
00717         totalDuration += duration;
00718     }
00719 
00720     if (bufferFrames) {
00721         // Track down the keyframe
00722         // Then decode until the frame before target
00723         _curFrame = findKeyFrame(frameNum) - 1;
00724         while (_curFrame < (int32)frameNum - 1)
00725             bufferNextFrame();
00726     } else {
00727         // Since frameNum is the frame that needs to be displayed
00728         // we'll set _curFrame to be the "last frame displayed"
00729         _curFrame = frameNum - 1;
00730     }
00731 
00732     _nextFrameStartTime = getCurEditTimeOffset();
00733 }
00734 
00735 const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::bufferNextFrame() {
00736     _curFrame++;
00737 
00738     // Get the next packet
00739     uint32 descId;
00740     Common::SeekableReadStream *frameData = getNextFramePacket(descId);
00741 
00742     if (!frameData || !descId || descId > _parent->sampleDescs.size()) {
00743         delete frameData;
00744         return 0;
00745     }
00746 
00747     // Find which video description entry we want
00748     VideoSampleDesc *entry = (VideoSampleDesc *)_parent->sampleDescs[descId - 1];
00749 
00750     if (!entry->_videoCodec) {
00751         delete frameData;
00752         return 0;
00753     }
00754 
00755     const Graphics::Surface *frame = entry->_videoCodec->decodeFrame(*frameData);
00756     delete frameData;
00757 
00758     // Update the palette
00759     if (entry->_videoCodec->containsPalette()) {
00760         // The codec itself contains a palette
00761         if (entry->_videoCodec->hasDirtyPalette()) {
00762             _curPalette = entry->_videoCodec->getPalette();
00763             _dirtyPalette = true;
00764         }
00765     } else {
00766         // Check if the video description has been updated
00767         byte *palette = entry->_palette;
00768 
00769         if (palette != _curPalette) {
00770             _curPalette = palette;
00771             _dirtyPalette = true;
00772         }
00773     }
00774 
00775     return frame;
00776 }
00777 
00778 uint32 QuickTimeDecoder::VideoTrackHandler::getRateAdjustedFrameTime() const {
00779     // Figure out what time the next frame is at taking the edit list rate into account
00780     Common::Rational offsetFromEdit = Common::Rational(_nextFrameStartTime - getCurEditTimeOffset()) / _parent->editList[_curEdit].mediaRate;
00781     uint32 convertedTime = offsetFromEdit.toInt();
00782 
00783     if ((offsetFromEdit.getNumerator() % offsetFromEdit.getDenominator()) > (offsetFromEdit.getDenominator() / 2))
00784         convertedTime++;
00785 
00786     return convertedTime + getCurEditTimeOffset();
00787 }
00788 
00789 uint32 QuickTimeDecoder::VideoTrackHandler::getCurEditTimeOffset() const {
00790     // Need to convert to the track scale
00791 
00792     // We have to round the time off to the nearest in the scale, otherwise
00793     // bad things happen. QuickTime docs are pretty silent on all this stuff,
00794     // so this was found from samples. It doesn't help that this is really
00795     // the only open source implementation of QuickTime edits.
00796 
00797     uint32 mult = _parent->editList[_curEdit].timeOffset * _parent->timeScale;
00798     uint32 result = mult / _decoder->_timeScale;
00799 
00800     if ((mult % _decoder->_timeScale) > (_decoder->_timeScale / 2))
00801         result++;
00802 
00803     return result;
00804 }
00805 
00806 uint32 QuickTimeDecoder::VideoTrackHandler::getCurEditTrackDuration() const {
00807     // Need to convert to the track scale
00808     return _parent->editList[_curEdit].trackDuration * _parent->timeScale / _decoder->_timeScale;
00809 }
00810 
00811 bool QuickTimeDecoder::VideoTrackHandler::atLastEdit() const {
00812     return _curEdit == _parent->editList.size();
00813 }
00814 
00815 bool QuickTimeDecoder::VideoTrackHandler::endOfCurEdit() const {
00816     // We're at the end of the edit once the next frame's time would
00817     // bring us past the end of the edit.
00818     return getRateAdjustedFrameTime() >= getCurEditTimeOffset() + getCurEditTrackDuration();
00819 }
00820 
00821 bool QuickTimeDecoder::VideoTrackHandler::canDither() const {
00822     for (uint i = 0; i < _parent->sampleDescs.size(); i++) {
00823         VideoSampleDesc *desc = (VideoSampleDesc *)_parent->sampleDescs[i];
00824 
00825         if (!desc || !desc->_videoCodec)
00826             return false;
00827     }
00828 
00829     return true;
00830 }
00831 
00832 void QuickTimeDecoder::VideoTrackHandler::setDither(const byte *palette) {
00833     assert(canDither());
00834 
00835     for (uint i = 0; i < _parent->sampleDescs.size(); i++) {
00836         VideoSampleDesc *desc = (VideoSampleDesc *)_parent->sampleDescs[i];
00837 
00838         if (desc->_videoCodec->canDither(Image::Codec::kDitherTypeQT)) {
00839             // Codec dither
00840             desc->_videoCodec->setDither(Image::Codec::kDitherTypeQT, palette);
00841         } else {
00842             // Forced dither
00843             _forcedDitherPalette = new byte[256 * 3];
00844             memcpy(_forcedDitherPalette, palette, 256 * 3);
00845             _ditherTable = Image::Codec::createQuickTimeDitherTable(_forcedDitherPalette, 256);
00846             _dirtyPalette = true;
00847         }
00848     }
00849 }
00850 
00851 namespace {
00852 
00853 // Return a pixel in RGB554
00854 uint16 makeDitherColor(byte r, byte g, byte b) {
00855     return ((r & 0xF8) << 6) | ((g & 0xF8) << 1) | (b >> 4);
00856 }
00857 
00858 // Default template to convert a dither color
00859 template<typename PixelInt>
00860 inline uint16 readDitherColor(PixelInt srcColor, const Graphics::PixelFormat& format, const byte *palette) {
00861     byte r, g, b;
00862     format.colorToRGB(srcColor, r, g, b);
00863     return makeDitherColor(r, g, b);
00864 }
00865 
00866 // Specialized version for 8bpp
00867 template<>
00868 inline uint16 readDitherColor(byte srcColor, const Graphics::PixelFormat& format, const byte *palette) {
00869     return makeDitherColor(palette[srcColor * 3], palette[srcColor * 3 + 1], palette[srcColor * 3 + 2]);
00870 }
00871 
00872 template<typename PixelInt>
00873 void ditherFrame(const Graphics::Surface &src, Graphics::Surface &dst, const byte *ditherTable, const byte *palette = 0) {
00874     static const uint16 colorTableOffsets[] = { 0x0000, 0xC000, 0x4000, 0x8000 };
00875 
00876     for (int y = 0; y < dst.h; y++) {
00877         const PixelInt *srcPtr = (const PixelInt *)src.getBasePtr(0, y);
00878         byte *dstPtr = (byte *)dst.getBasePtr(0, y);
00879         uint16 colorTableOffset = colorTableOffsets[y & 3];
00880 
00881         for (int x = 0; x < dst.w; x++) {
00882             uint16 color = readDitherColor(*srcPtr++, src.format, palette);
00883             *dstPtr++ = ditherTable[colorTableOffset + color];
00884             colorTableOffset += 0x4000;
00885         }
00886     }
00887 }
00888 
00889 } // End of anonymous namespace
00890 
00891 const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::forceDither(const Graphics::Surface &frame) {
00892     if (frame.format.bytesPerPixel == 1) {
00893         // This should always be true, but this is for sanity
00894         if (!_curPalette)
00895             return &frame;
00896 
00897         // If the palettes match, bail out
00898         if (memcmp(_forcedDitherPalette, _curPalette, 256 * 3) == 0)
00899             return &frame;
00900     }
00901 
00902     // Need to create a new one
00903     if (!_ditherFrame) {
00904         _ditherFrame = new Graphics::Surface();
00905         _ditherFrame->create(frame.w, frame.h, Graphics::PixelFormat::createFormatCLUT8());
00906     }
00907 
00908     if (frame.format.bytesPerPixel == 1)
00909         ditherFrame<byte>(frame, *_ditherFrame, _ditherTable, _curPalette);
00910     else if (frame.format.bytesPerPixel == 2)
00911         ditherFrame<uint16>(frame, *_ditherFrame, _ditherTable);
00912     else if (frame.format.bytesPerPixel == 4)
00913         ditherFrame<uint32>(frame, *_ditherFrame, _ditherTable);
00914 
00915     return _ditherFrame;
00916 }
00917 
00918 } // End of namespace Video


Generated on Sat Mar 16 2019 05:01:50 for ResidualVM by doxygen 1.7.1
curved edge   curved edge