Giter Site home page Giter Site logo

sifutang / ffmpeg-demo Goto Github PK

View Code? Open in Web Editor NEW
36.0 3.0 9.0 74.56 MB

基于FFMpeg6.0的Android端播放Demo,包含基本的导入导出功能

License: Apache License 2.0

Kotlin 4.37% CMake 0.67% C++ 2.87% C 70.11% GLSL 0.07% Java 0.30% Makefile 3.15% Assembly 0.19% Shell 10.19% M4 0.45% DIGITAL Command Language 0.25% Dockerfile 0.02% Batchfile 0.08% Roff 6.24% Awk 0.99% Module Management System 0.05%
android ffmpeg camerax mediacodec opengl libpng libjpeg-turbo

ffmpeg-demo's People

Contributors

sifutang avatar

Stargazers

 avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar

Watchers

 avatar  avatar

ffmpeg-demo's Issues

从 frame->data 取的数据可能出现问题

void FFMpegPlayer::doRender(JNIEnv *env, AVFrame *avFrame) {
    if (avFrame->format == AV_PIX_FMT_YUV420P) {
        if (!avFrame->data[0] || !avFrame->data[1] || !avFrame->data[2]) {
            LOGE("doRender failed, no yuv buffer")
            return;
        }

        int ySize = avFrame->width * avFrame->height;
        auto y = env->NewByteArray(ySize);
        env->SetByteArrayRegion(y, 0, ySize, reinterpret_cast<const jbyte *>(avFrame->data[0]));
        auto u = env->NewByteArray(ySize / 4);
        env->SetByteArrayRegion(u, 0, ySize / 4, reinterpret_cast<const jbyte *>(avFrame->data[1]));
        auto v = env->NewByteArray(ySize / 4);
        env->SetByteArrayRegion(v, 0, ySize / 4, reinterpret_cast<const jbyte *>(avFrame->data[2]));

        if (mPlayerJni.isValid()) {
            env->CallVoidMethod(mPlayerJni.instance, mPlayerJni.onVideoFrameArrived,
                                avFrame->width, avFrame->height, FMT_VIDEO_YUV420, y, u, v);
        }

        env->DeleteLocalRef(y);
        env->DeleteLocalRef(u);
        env->DeleteLocalRef(v);
    } else if (avFrame->format == AV_PIX_FMT_NV12) {
        if (!avFrame->data[0] || !avFrame->data[1]) {
            LOGE("doRender failed, no nv21 buffer")
            return;
        }

        int ySize = avFrame->width * avFrame->height;
        auto y = env->NewByteArray(ySize);
        env->SetByteArrayRegion(y, 0, ySize, reinterpret_cast<const jbyte *>(avFrame->data[0]));
        auto uv = env->NewByteArray(ySize / 2);
        env->SetByteArrayRegion(uv, 0, ySize / 2, reinterpret_cast<const jbyte *>(avFrame->data[1]));

        if (mPlayerJni.isValid()) {
            env->CallVoidMethod(mPlayerJni.instance, mPlayerJni.onVideoFrameArrived,
                                avFrame->width, avFrame->height, FMT_VIDEO_NV12, y, uv, nullptr);
        }

        env->DeleteLocalRef(y);
        env->DeleteLocalRef(uv);
    } else if (avFrame->format == AV_PIX_FMT_RGBA) {
        if (!avFrame->data[0]) {
            LOGE("doRender failed, no rgba buffer")
            return;
        }

        int size = avFrame->width * avFrame->height * 4;
        auto rgba = env->NewByteArray(size);
        env->SetByteArrayRegion(rgba, 0, size, reinterpret_cast<const jbyte *>(avFrame->data[0]));

        if (mPlayerJni.isValid()) {
            env->CallVoidMethod(mPlayerJni.instance, mPlayerJni.onVideoFrameArrived,
                                avFrame->width, avFrame->height, FMT_VIDEO_RGBA, rgba, nullptr, nullptr);
        }

        env->DeleteLocalRef(rgba);
    } else if (avFrame->format == AV_PIX_FMT_MEDIACODEC) {
        av_mediacodec_release_buffer((AVMediaCodecBuffer *)avFrame->data[3], 1);
    } else if (avFrame->format == AV_SAMPLE_FMT_FLTP) {
        int dataSize = mAudioDecoder->mDataSize;
        bool flushRender = mAudioDecoder->mNeedFlushRender;
        if (dataSize > 0) {
            uint8_t *audioBuffer = mAudioDecoder->mAudioBuffer;
            if (mIsMute) {
                memset(audioBuffer, 0, dataSize);
            }
            auto jByteArray = env->NewByteArray(dataSize);
            env->SetByteArrayRegion(jByteArray, 0, dataSize, reinterpret_cast<const jbyte *>(audioBuffer));

            if (mPlayerJni.isValid()) {
                env->CallVoidMethod(mPlayerJni.instance, mPlayerJni.onAudioFrameArrived, jByteArray, dataSize, flushRender);
            }
            env->DeleteLocalRef(jByteArray);
        }
    }
}

你的这个方法里面是直接从 frame->data 中获取解码后的图片数据,而没有考虑 linesize,linesize 表示图片这行的数据大小,如果以 y 为例,通常是大于等于 image_width,当 linesize 等于 image_width 时,你的方法是没有问题的,但是当 linesize 大于 image_width 时你的方法就有问题,大于时需要一行一行地读取数据,需要忽略掉图片右边 linesize - imagewidth 的像素。

下面是我的参考代码:

void copyFrameData(uint8_t * dst, uint8_t * src, int image_width, int image_height, int line_stride, int pixel_stride) {
    if ((image_width * pixel_stride) >= line_stride) {
        memcpy(dst, src, image_width * image_height * pixel_stride);
    } else {
        uint8_t *dst_offset = dst;
        uint8_t *src_offset = src;
        int image_line_len = image_width * pixel_stride;
        for (int i = 0; i < image_height; i ++) {
            memcpy(dst_offset, src_offset, image_line_len);
            dst_offset += image_line_len;
            src_offset += line_stride;
        }
    }
}
// ...
// yuv420p
uint8_t *yBuffer = videoBuffer->yBuffer;
uint8_t *uBuffer = videoBuffer->uBuffer;
uint8_t *vBuffer = videoBuffer->vBuffer;
copyFrameData(yBuffer, frame->data[0], w, h, frame->linesize[0], 1);
copyFrameData(uBuffer, frame->data[1], w / 2, h / 2, frame->linesize[1], 1);
copyFrameData(vBuffer, frame->data[2], w / 2, h / 2, frame->linesize[2], 1);
//...

// ...
// yuv420sp
uint8_t *yBuffer = videoBuffer->yBuffer;
uint8_t *uvBuffer = videoBuffer->uvBuffer;
copyFrameData(yBuffer, frame->data[0], w, h, frame->linesize[0], 1);
copyFrameData(uvBuffer, frame->data[1], w / 2, h / 2, frame->linesize[1], 2);
// ...

// ...
// rgba
uint8_t *rgbaBuffer = videoBuffer->rgbaBuffer;
copyFrameData(rgbaBuffer, frame->data[0], w, h, frame->linesize[0], 4);

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.