10v转16v_颜色代码转rgb

10v转16v_颜色代码转rgb/*  NV12ToARGBcolorspaceconversionCUDAkernel  ThissampleusesCUDAtoperformasimpleNV12(YUV4:2:0planar)  sourceandconvertstooutputinARGBformat*/#include

大家好,又见面了,我是你们的朋友全栈君。如果您正在找激活码,请点击查看最新教程,关注关注公众号 “全栈程序员社区” 获取激活教程,可能之前旧版本教程已经失效.最新Idea2022.1教程亲测有效,一键激活。

Jetbrains全系列IDE稳定放心使用

/*
    NV12ToARGB color space conversion CUDA kernel

    This sample uses CUDA to perform a simple NV12 (YUV 4:2:0 planar)
    source and converts to output in ARGB format
*/

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include “cudaProcessFrame.h”

__constant__ uint32 constAlpha;

#define MUL(x,y)    (x*y)
__constant__ float  constHueColorSpaceMat[9];

__device__ void YUV2RGB(uint32 *yuvi, float *red, float *green, float *blue)
{

    float luma, chromaCb, chromaCr;

    // Prepare for hue adjustment
    luma     = (float)yuvi[0];
    chromaCb = (float)((int32)yuvi[1] – 512.0f);
    chromaCr = (float)((int32)yuvi[2] – 512.0f);

    // Convert YUV To RGB with hue adjustment
    *red  = MUL(luma,     constHueColorSpaceMat[0]) +
            MUL(chromaCb, constHueColorSpaceMat[1]) +
            MUL(chromaCr, constHueColorSpaceMat[2]);
    *green= MUL(luma,     constHueColorSpaceMat[3]) +
            MUL(chromaCb, constHueColorSpaceMat[4]) +
            MUL(chromaCr, constHueColorSpaceMat[5]);
    *blue = MUL(luma,     constHueColorSpaceMat[6]) +
            MUL(chromaCb, constHueColorSpaceMat[7]) +
            MUL(chromaCr, constHueColorSpaceMat[8]);
}

__device__ uint32 RGBAPACK_8bit(float red, float green, float blue, uint32 alpha)
{

    uint32 ARGBpixel = 0;

    // Clamp final 10 bit results
    red   = min(max(red,   0.0f), 255.0f);
    green = min(max(green, 0.0f), 255.0f);
    blue  = min(max(blue,  0.0f), 255.0f);

    // Convert to 8 bit unsigned integers per color component
    ARGBpixel = (((uint32)blue) |
                 (((uint32)green) << 8)  |
                 (((uint32)red) << 16) | (uint32)alpha);

    return  ARGBpixel;
}

__device__ uint32 RGBAPACK_10bit(float red, float green, float blue, uint32 alpha)
{

    uint32 ARGBpixel = 0;

    // Clamp final 10 bit results
    red   = min(max(red,   0.0f), 1023.f);
    green = min(max(green, 0.0f), 1023.f);
    blue  = min(max(blue,  0.0f), 1023.f);

    // Convert to 8 bit unsigned integers per color component
    ARGBpixel = (((uint32)blue  >> 2) |
                 (((uint32)green >> 2) << 8)  |
                 (((uint32)red   >> 2) << 16) | (uint32)alpha);

    return  ARGBpixel;
}

// CUDA kernel for outputing the final ARGB output from NV12;
extern “C”
__global__ void Passthru_drvapi(uint32 *srcImage,   size_t nSourcePitch,
                                uint32 *dstImage,   size_t nDestPitch,
                                uint32 width,       uint32 height)
{

    int32 x, y;
    uint32 yuv101010Pel[2];
    uint32 processingPitch = ((width) + 63) & ~63;
    uint32 dstImagePitch   = nDestPitch >> 2;
    uint8 *srcImageU8     = (uint8 *)srcImage;

    processingPitch = nSourcePitch;

    // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
    x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
    y = blockIdx.y *  blockDim.y       +  threadIdx.y;

    if (x >= width)
        return; //x = width – 1;

    if (y >= height)
        return; // y = height – 1;

    // Read 2 Luma components at a time, so we don’t waste processing since CbCr are decimated this way.
    // if we move to texture we could read 4 luminance values
    yuv101010Pel[0] = (srcImageU8[y * processingPitch + x    ]);
    yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]);

    // this steps performs the color conversion
    float luma[2];

    luma[0]   = (yuv101010Pel[0]        & 0x00FF);
    luma[1]   = (yuv101010Pel[1]        & 0x00FF);

    // Clamp the results to RGBA
    dstImage[y * dstImagePitch + x     ] = RGBAPACK_8bit(luma[0], luma[0], luma[0], constAlpha);
    dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_8bit(luma[1], luma[1], luma[1], constAlpha);
}

// CUDA kernel for outputing the final ARGB output from NV12;
extern “C”
__global__ void NV12ToARGB_drvapi(uint32 *srcImage,     size_t nSourcePitch,
                                  uint32 *dstImage,     size_t nDestPitch,
                                  uint32 width,         uint32 height)
{

    int32 x, y;
    uint32 yuv101010Pel[2];
    uint32 processingPitch = ((width) + 63) & ~63;
    uint32 dstImagePitch   = nDestPitch >> 2;
    uint8 *srcImageU8     = (uint8 *)srcImage;

    processingPitch = nSourcePitch;

    // Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
    x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
    y = blockIdx.y *  blockDim.y       +  threadIdx.y;

    if (x >= width)
        return; //x = width – 1;

    if (y >= height)
        return; // y = height – 1;

    // Read 2 Luma components at a time, so we don’t waste processing since CbCr are decimated this way.
    // if we move to texture we could read 4 luminance values
    yuv101010Pel[0] = (srcImageU8[y * processingPitch + x    ]) << 2;
    yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;

    uint32 chromaOffset    = processingPitch * height;
    int32 y_chroma = y >> 1;

    if (y & 1)  // odd scanline ?
    {

        uint32 chromaCb;
        uint32 chromaCr;

        chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x    ];
        chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];

        if (y_chroma < ((height >> 1) – 1)) // interpolate chroma vertically
        {

            chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x    ] + 1) >> 1;
            chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
        }

        yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE       + 2));
        yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));

        yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE       + 2));
        yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
    }
    else
    {

        yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x    ] << (COLOR_COMPONENT_BIT_SIZE       + 2));
        yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));

        yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x    ] << (COLOR_COMPONENT_BIT_SIZE       + 2));
        yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
    }

    // this steps performs the color conversion
    uint32 yuvi[6];
    float red[2], green[2], blue[2];

    yuvi[0] = (yuv101010Pel[0] &   COLOR_COMPONENT_MASK);
    yuvi[1] = ((yuv101010Pel[0] >>  COLOR_COMPONENT_BIT_SIZE)       & COLOR_COMPONENT_MASK);
    yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);

    yuvi[3] = (yuv101010Pel[1] &   COLOR_COMPONENT_MASK);
    yuvi[4] = ((yuv101010Pel[1] >>  COLOR_COMPONENT_BIT_SIZE)       & COLOR_COMPONENT_MASK);
    yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);

    // YUV to RGB Transformation conversion
    YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
    YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);

    // Clamp the results to RGBA
    dstImage[y * dstImagePitch + x     ] = RGBAPACK_10bit(red[0], green[0], blue[0], constAlpha);
    dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(red[1], green[1], blue[1], constAlpha);
}

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请联系我们举报,一经查实,本站将立刻删除。

发布者:全栈程序员-站长,转载请注明出处:https://javaforall.net/189834.html原文链接:https://javaforall.net

(0)
全栈程序员-站长的头像全栈程序员-站长


相关推荐

  • 多重共线性检验-方差膨胀系数(VIF)-相关系数(机器学习)sklearn

    多重共线性检验-方差膨胀系数(VIF)-相关系数(机器学习)sklearn文章目录多重共线性检验-方差膨胀系数(VIF)1、原理:2、多重共线性:3、检验方法:方差膨胀系数(VIF):相关性检验:4、代码测试4.1导入相关库4.2准备数据4.3计算膨胀因子4.4计算相关系数4.5分割测试集4.6模型选择4.7AUC值4.8模型调整4.8.1删除账户资金4.8.2删除累计交易佣金5、总结多重共线性检验-方差膨胀系数(VIF)1、原理:方差膨胀系数是衡量多元线性回归模型中多重共线性严重程度的一种度量。它表示回归系数估计量的方差与假设自变量间不线性相关时方差相比的比值。

    2022年6月9日
    47
  • kafka消息和日志

    kafka消息和日志

    2021年9月15日
    62
  • vscode html注释快捷键_VSCode 的快捷键及常用插件总结

    vscode html注释快捷键_VSCode 的快捷键及常用插件总结1、注释:·单行注释:ctrl+/,注释后再按取消·取消单行注释:alt+shift+A注释后再按取消2、移动行·向上移动一行:alt+up·向下移动一行:alt+down3、显示/隐藏左侧目录栏·ctrl+b4、复制当前行·向上复制一行:shift+alt+up·向下复制一行:shift+alt+down5、删除当前行·shift+ctrl+k6、控制台显示隐藏…

    2022年6月11日
    95
  • SQL Server中的sp_executesql系统存储过程

    SQL Server中的sp_executesql系统存储过程sp_executesql语法 sp_executesql[@stmt=]stmt[    {,[@params=]N@parameter_name  data_type[,…n]}    {,[@param1=]value1[,…n]}] 参数[@stmt=]stmt 包含 Transact-SQL 语

    2022年5月21日
    33
  • idea、webStorm怎么使用快捷键复制上一行

    idea、webStorm怎么使用快捷键复制上一行今天用idea写代码时,想要复制一行代码,才发现以前用Eclipse的复制快捷键在Idea中不起作用。————————————————————————————————————————————设置如下:File—>Settings(Ctrl+Alt+s)—>Keymap—>搜索输入(duplicate)—>双击Duplicate Entire Li…

    2022年6月13日
    127
  • 图片批量重命名(python实现)「建议收藏」

    图片批量重命名(python实现)「建议收藏」自己在采集数据时,有时候的数据命名方式并不满足一些开源程序的条件,如果我们可以自己随意去改变图像的命名,问题就变得很容易解决;一、代码importospath=”/media/hltt3838/DATA/dida_data/20210421_camera_IMU/dataset-dir/cam0″filelist=os.listdir(path)count=1403636580513555456forfileinfilelist:print(file)for

    2025年9月13日
    4

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注

关注全栈程序员社区公众号