Skip to main content
added 2814 characters in body
Source Link

VS_NORMALMAP_INPUT:

//Vertex shader input
struct VS_NORMALMAP_INPUT
{
    float3 Pos     : POSITION;
    float3 Normal  : NORMAL;
    float3 Tangent : TANGENT;
    float2 UV      : TEXCOORD0;TEXCOORD;
};

Vertex Shader:

VS_LIGHTING_OUTPUT LightingTransformTex(VS_NORMALMAP_INPUT vIn)
//Vertex{
 shader output
struct  VS_LIGHTING_OUTPUT vOut;
{
float4 ProjPos   // Use world matrix :passed SV_POSITION;from C++ //to 2Dtransform "projected"the positioninput formodel vertex (required outputposition forinto vertexworld shader)space
float3 WorldPos : POSITION;
float3 WorldNormalfloat4 :modelPos NORMAL;
float3= Tangentfloat4(vIn.Pos, :1.0f); TANGENT;
float2// UVPromote to 1x4 so we can multiply by 4x4 matrix, put 1.0 :in TEXCOORD;
};
4th 
//Vertexelement shader
VS_BASIC_OUTPUTfor VS_PlainTexturea point (VS_BASIC_INPUT0.0 vInfor a vector)
{
VS_BASIC_OUTPUT vOut;
 
   float4 modelPosworldPos = float4mul(vIn.PosmodelPos, 1.0fWorldMatrix);
float4 worldPos   vOut.WorldPos = mulworldPos.xyz;

    // Use camera matrices to further transform the vertex from world space into view space (modelPos,camera's WorldMatrixpoint of view); and finally into 2D "projection" space for rendering
    float4 viewPos = mul(worldPos, ViewMatrix);
    vOut.ProjPos = mul(viewPos, ProjMatrix);
 
vOut.UV = vIn.UV;
 
return vOut;
}
 
 //Pixel Shader
float4Transform ShadowMapTexthe vertex normal from model space into world space (VS_LIGHTING_OUTPUTalmost vOut)same :as SV_Target
{first lines of code above)
float3    float4 modelNormal = normalizefloat4(vOutvIn.WorldNormalNormal, 0.0f); // Set 4th element to 0.0 this time as normals are vectors
float3 modelTangent = normalize( vOut.Tangent);
 = vIn.Tangent;
float3 modelBiTangent   vOut.WorldNormal = crossmul(modelNormal, modelTangentWorldMatrix);.xyz;
float3x3 
 invTangentMatrix = float3x3 // Pass texture coordinates (modelTangent,UVs) modelBiTangenton to the pixel shader, modelNormal);
the 
float3vertex CameraDirshader =doesn't normalize(CameraPosneed -them
    vOut.WorldPosUV = vIn.xyz);UV;
 
float3x3 invWorldMatrix = transpose return vOut;
}

Input Layout:

bool CModel::Load(WorldMatrix const string& fileName, ID3D10EffectTechnique* exampleTechnique, bool tangents /*= false*/ );
float3 cameraModelDir// =The normalize(mulcommented out bit is the default parameter (CameraDircan't write it here, invWorldMatrix)only in the declaration);
{
    // Release any existing geometry in this object
float3x3 tangentMatrix = transpose ReleaseResources(invTangentMatrix);
float2 
 textureOffsetDir = mul // Use CImportXFile class (cameraModelDir,from tangentMatrixanother application); to load the given file. The import code is wrapped in the namespace 'gen'
    gen::CImportXFile mesh;
float texDepth = ParallaxDepth *if (NormalMapmesh.SampleImportFile(Trilinear, vOutfileName.UVc_str().a -) 0.5f!= gen::kSuccess);
    {
float2 offsetTexCoord = vOut.UV + texDepth * textureOffsetDir; return false;
    }
float3 
 textureNormal = 2.0f *// NormalMap.Sample(Trilinear,Get offsetTexCoord)first sub-mesh 1.0f;from loaded file
    gen::SSubMesh subMesh;
float3 worldNormal = normalize(mul if (mulmesh.GetSubMesh(textureNormal 0, invTangentMatrix)&subMesh, WorldMatrixtangents ) != gen::kSuccess);
    {
float3 Light1Dir = normalize(LightPos1 - vOut   return false;
    }


    // Create vertex element list & layout.WorldPos We need a vertex layout to say what data we have per vertex in this model (e.xyzg. position, normal, uv, etc.);
float3 Light1Dist = length(LightPos1 -// vOut.WorldPosIn previous projects the element list was a manually typed in array as we knew what data we would provide.xyz); However, as we can load models with
float3 DiffuseLight1 = LightColour1 *// max(dot(worldNormaldifferent vertex data this time we need flexible code.xyz The array is built up one element at a time: ask the import class if it loaded normals, Light1Dir)
    // if so then add a normal line to the array, 0)then ask if it loaded UVS...etc
    unsigned int numElts = 0;
    unsigned int offset = 0;
    // Light1Dist;Position is always required
float3 halfway   m_VertexElts[numElts].SemanticName = normalize"POSITION";   // Semantic in HLSL (Light1Dirwhat +is CameraDirthis data for);
float3 SpecularLight1   m_VertexElts[numElts].SemanticIndex = DiffuseLight10; * pow(max(dot         // Index to add to semantic (worldNormal.xyza count for this kind of data, halfway)when using multiple of the same type, 0)e.g. TEXCOORD0, SpecularPowerTEXCOORD1);
 
float3 Light2Dir  m_VertexElts[numElts].Format = normalize(LightPos2DXGI_FORMAT_R32G32B32_FLOAT; // Type of data - vOut.WorldPosthis one will be a float3 in the shader.xyz); Most data communicated as though it were colours
float3 Light2Dist   m_VertexElts[numElts].AlignedByteOffset = length(LightPos2offset; - vOut// Offset of element from start of vertex data (e.WorldPosg.xyz);
float3 DiffuseLight2if =we LightColour2have *position max(dot(worldNormal.xyzfloat3), Light2Diruv (float2) then normal, 0)the /normal's Light2Dist;
halfwayoffset is 5 floats = normalize(Light2Dir5*4 += CameraDir20);
float3 SpecularLight2   m_VertexElts[numElts].InputSlot = DiffuseLight20; * pow(max(dot             // For when using multiple vertex buffers (worldNormale.xyz,g. halfway),instancing 0),- SpecularPoweran advanced topic);
 
float4 SpotlightViewPos  m_VertexElts[numElts].InputSlotClass = mul(float4D3D10_INPUT_PER_VERTEX_DATA; // Use this value for most cases (vOutonly changed for instancing)
    m_VertexElts[numElts].WorldPos,InstanceDataStepRate 1= 0;                     // --"--
    offset += 12;
    ++numElts;
    // Repeat for each kind of vertex data
    if (subMesh.0fhasNormals), 
 SpotlightViewMatrix);   {
float4 SpotlightProjPos       m_VertexElts[numElts].SemanticName = mul(SpotlightViewPos,"NORMAL";
 SpotlightProjMatrix);       m_VertexElts[numElts].SemanticIndex = 0;
        m_VertexElts[numElts].Format = DXGI_FORMAT_R32G32B32_FLOAT;
float3 SpotlightDir       m_VertexElts[numElts].AlignedByteOffset = normalize(SpotlightPosoffset;
 - vOut      m_VertexElts[numElts].WorldPosInputSlot = 0;
        m_VertexElts[numElts].xyz);InputSlotClass = D3D10_INPUT_PER_VERTEX_DATA;
        m_VertexElts[numElts].InstanceDataStepRate = 0;
if (dot(SpotlightFacing, -SpotlightDir) > SpotlightCosAngle) //**** This condition needsoffset to+= be12;
 written as the first exercise to get spotlights++numElts;
 working   }
    if (subMesh.hasTangents)
    {
float2 shadowUV       m_VertexElts[numElts].SemanticName = 0"TANGENT";
        m_VertexElts[numElts].5fSemanticIndex *= SpotlightProjPos0;
        m_VertexElts[numElts].xyFormat /= SpotlightProjPosDXGI_FORMAT_R32G32B32_FLOAT;
        m_VertexElts[numElts].wAlignedByteOffset += float2(0offset;
        m_VertexElts[numElts].5f,InputSlot 0= 0;
        m_VertexElts[numElts].5fInputSlotClass = D3D10_INPUT_PER_VERTEX_DATA;
        m_VertexElts[numElts].InstanceDataStepRate = 0;
        offset += 12;
        ++numElts;
    }
    if (subMesh.hasTextureCoords);
shadowUV    {
        m_VertexElts[numElts].ySemanticName = 1"TEXCOORD";
        m_VertexElts[numElts].0fSemanticIndex -= shadowUV0;
        m_VertexElts[numElts].y;Format = DXGI_FORMAT_R32G32_FLOAT;
        m_VertexElts[numElts].AlignedByteOffset = offset;
float depthFromLight       m_VertexElts[numElts].InputSlot = SpotlightProjPos0;
        m_VertexElts[numElts].zInputSlotClass /= SpotlightProjPosD3D10_INPUT_PER_VERTEX_DATA;
        m_VertexElts[numElts].w;//InstanceDataStepRate -= DepthAdjust;0;
 //*** Adjustment so polygons don't shadow themselves offset += 8;
        ++numElts;
if (depthFromLight < ShadowMap1.Sample(PointClamp, shadowUV)}
    if (subMesh.rhasVertexColours)
    {
        m_VertexElts[numElts].SemanticName = "COLOR";
float3 SpotlightDist       m_VertexElts[numElts].SemanticIndex = length(SpotlightPos0;
        m_VertexElts[numElts].Format = DXGI_FORMAT_R8G8B8A8_UNORM; // A RGBA colour with 1 byte (0-255) vOutper component
        m_VertexElts[numElts].WorldPosAlignedByteOffset = offset;
        m_VertexElts[numElts].xyz);InputSlot = 0;
diffuseLight3        m_VertexElts[numElts].InputSlotClass = SpotlightColourD3D10_INPUT_PER_VERTEX_DATA;
 * max(dot(worldNormal      m_VertexElts[numElts].xyz,InstanceDataStepRate SpotlightDir)= 0;
        offset += 4;
        ++numElts;
    }
    m_VertexSize = offset;

    // Given the vertex element list, 0)pass it to DirectX to create a vertex layout. We also need to pass an example of a technique that will
    // SpotlightDist;render this model. We will only be able to render this model with techniques that have the same vertex input as the example we use here
float3 halfway = normalize D3D10_PASS_DESC PassDesc;
    exampleTechnique->GetPassByIndex(SpotlightDir +0 cameraDir)->GetDesc( &PassDesc );
specularLight3 = diffuseLight3 * pow(max(dotDevice->CreateInputLayout(worldNormal.xyz m_VertexElts, halfway)numElts, 0)PassDesc.pIAInputSignature, SpecularPowerPassDesc.IAInputSignatureSize, &m_VertexLayout );
}
}
 
float3 DiffuseLight = AmbientColour// +Create DiffuseLight1the +vertex DiffuseLight2buffer +and diffuseLight3;fill it with the loaded vertex data
float3 SpecularLight   m_NumVertices = SpecularLight1subMesh.numVertices;
 + SpecularLight2 + specularLight3;
D3D10_BUFFER_DESC bufferDesc;
float4 DiffuseMaterial = DiffuseMap bufferDesc.Sample(Trilinear,BindFlags offsetTexCoord);= D3D10_BIND_VERTEX_BUFFER;
float3 SpecularMaterial = DiffuseMaterial bufferDesc.a;
Usage 
float4= combinedColour;D3D10_USAGE_DEFAULT; // Not a dynamic buffer
combinedColour    bufferDesc.rgbByteWidth = DiffuseMaterialm_NumVertices * DiffuseLight +m_VertexSize; SpecularMaterial// *Buffer SpecularLight;size
combinedColour    bufferDesc.aCPUAccessFlags = 1.0f;0;   // NoIndicates alphathat processingCPU inwon't access this shader,buffer soat justall setafter itcreation
 to 1  bufferDesc.MiscFlags = 0;
    D3D10_SUBRESOURCE_DATA initData; // Initial data
    initData.pSysMem = subMesh.vertices;   
    if (FAILED( Device->CreateBuffer( &bufferDesc, &initData, &m_VertexBuffer )))
    {
        return combinedColour;false;
    }
//Vertex shader input
struct VS_NORMALMAP_INPUT
{
float3 Pos     : POSITION;
float3 Normal  : NORMAL;
float3 Tangent : TANGENT;
float2 UV      : TEXCOORD0;
};
 
//Vertex shader output
struct VS_LIGHTING_OUTPUT
{
float4 ProjPos       : SV_POSITION;  // 2D "projected" position for vertex (required output for vertex shader)
float3 WorldPos : POSITION;
float3 WorldNormal : NORMAL;
float3 Tangent : TANGENT;
float2 UV            : TEXCOORD;
};
 
//Vertex shader
VS_BASIC_OUTPUT VS_PlainTexture(VS_BASIC_INPUT vIn)
{
VS_BASIC_OUTPUT vOut;
 
 float4 modelPos = float4(vIn.Pos, 1.0f);
float4 worldPos = mul(modelPos, WorldMatrix);
float4 viewPos = mul(worldPos, ViewMatrix);
vOut.ProjPos = mul(viewPos, ProjMatrix);
 
vOut.UV = vIn.UV;
 
return vOut;
}
 
 //Pixel Shader
float4 ShadowMapTex(VS_LIGHTING_OUTPUT vOut) : SV_Target
{
float3 modelNormal = normalize(vOut.WorldNormal);
float3 modelTangent = normalize(vOut.Tangent);
 
float3 modelBiTangent = cross(modelNormal, modelTangent);
float3x3 invTangentMatrix = float3x3(modelTangent, modelBiTangent, modelNormal);
 
float3 CameraDir = normalize(CameraPos - vOut.WorldPos.xyz);
 
float3x3 invWorldMatrix = transpose(WorldMatrix);
float3 cameraModelDir = normalize(mul(CameraDir, invWorldMatrix));
 
float3x3 tangentMatrix = transpose(invTangentMatrix);
float2 textureOffsetDir = mul(cameraModelDir, tangentMatrix);
 
float texDepth = ParallaxDepth * (NormalMap.Sample(Trilinear, vOut.UV).a - 0.5f);
 
float2 offsetTexCoord = vOut.UV + texDepth * textureOffsetDir;
 
float3 textureNormal = 2.0f * NormalMap.Sample(Trilinear, offsetTexCoord) - 1.0f;
 
float3 worldNormal = normalize(mul(mul(textureNormal, invTangentMatrix), WorldMatrix));
 
float3 Light1Dir = normalize(LightPos1 - vOut.WorldPos.xyz);
float3 Light1Dist = length(LightPos1 - vOut.WorldPos.xyz);
float3 DiffuseLight1 = LightColour1 * max(dot(worldNormal.xyz, Light1Dir), 0) / Light1Dist;
float3 halfway = normalize(Light1Dir + CameraDir);
float3 SpecularLight1 = DiffuseLight1 * pow(max(dot(worldNormal.xyz, halfway), 0), SpecularPower);
 
float3 Light2Dir = normalize(LightPos2 - vOut.WorldPos.xyz);
float3 Light2Dist = length(LightPos2 - vOut.WorldPos.xyz);
float3 DiffuseLight2 = LightColour2 * max(dot(worldNormal.xyz, Light2Dir), 0) / Light2Dist;
halfway = normalize(Light2Dir + CameraDir);
float3 SpecularLight2 = DiffuseLight2 * pow(max(dot(worldNormal.xyz, halfway), 0), SpecularPower);
 
float4 SpotlightViewPos = mul(float4(vOut.WorldPos, 1.0f), SpotlightViewMatrix);
float4 SpotlightProjPos = mul(SpotlightViewPos, SpotlightProjMatrix);
 
float3 SpotlightDir = normalize(SpotlightPos - vOut.WorldPos.xyz);
 
if (dot(SpotlightFacing, -SpotlightDir) > SpotlightCosAngle) //**** This condition needs to be written as the first exercise to get spotlights working
{
float2 shadowUV = 0.5f * SpotlightProjPos.xy / SpotlightProjPos.w + float2(0.5f, 0.5f);
shadowUV.y = 1.0f - shadowUV.y;
 
float depthFromLight = SpotlightProjPos.z / SpotlightProjPos.w;// - DepthAdjust; //*** Adjustment so polygons don't shadow themselves
 
if (depthFromLight < ShadowMap1.Sample(PointClamp, shadowUV).r)
{
 
float3 SpotlightDist = length(SpotlightPos - vOut.WorldPos.xyz);
diffuseLight3 = SpotlightColour * max(dot(worldNormal.xyz, SpotlightDir), 0) / SpotlightDist;
float3 halfway = normalize(SpotlightDir + cameraDir);
specularLight3 = diffuseLight3 * pow(max(dot(worldNormal.xyz, halfway), 0), SpecularPower);
}
}
 
float3 DiffuseLight = AmbientColour + DiffuseLight1 + DiffuseLight2 + diffuseLight3;
float3 SpecularLight = SpecularLight1 + SpecularLight2 + specularLight3;
 
float4 DiffuseMaterial = DiffuseMap.Sample(Trilinear, offsetTexCoord);
float3 SpecularMaterial = DiffuseMaterial.a;
 
float4 combinedColour;
combinedColour.rgb = DiffuseMaterial * DiffuseLight + SpecularMaterial * SpecularLight;
combinedColour.a = 1.0f; // No alpha processing in this shader, so just set it to 1
 
return combinedColour;
}

VS_NORMALMAP_INPUT:

struct VS_NORMALMAP_INPUT
{
    float3 Pos     : POSITION;
    float3 Normal  : NORMAL;
    float3 Tangent : TANGENT;
    float2 UV      : TEXCOORD;
};

Vertex Shader:

VS_LIGHTING_OUTPUT LightingTransformTex(VS_NORMALMAP_INPUT vIn)
{
    VS_LIGHTING_OUTPUT vOut;

    // Use world matrix passed from C++ to transform the input model vertex position into world space
    float4 modelPos = float4(vIn.Pos, 1.0f); // Promote to 1x4 so we can multiply by 4x4 matrix, put 1.0 in 4th element for a point (0.0 for a vector)
    float4 worldPos = mul(modelPos, WorldMatrix);
    vOut.WorldPos = worldPos.xyz;

    // Use camera matrices to further transform the vertex from world space into view space (camera's point of view) and finally into 2D "projection" space for rendering
    float4 viewPos = mul(worldPos, ViewMatrix);
    vOut.ProjPos = mul(viewPos, ProjMatrix);

    // Transform the vertex normal from model space into world space (almost same as first lines of code above)
    float4 modelNormal = float4(vIn.Normal, 0.0f); // Set 4th element to 0.0 this time as normals are vectors
    vOut.Tangent = vIn.Tangent;
    vOut.WorldNormal = mul(modelNormal, WorldMatrix).xyz;
 
    // Pass texture coordinates (UVs) on to the pixel shader, the vertex shader doesn't need them
    vOut.UV = vIn.UV;

    return vOut;
}

Input Layout:

bool CModel::Load( const string& fileName, ID3D10EffectTechnique* exampleTechnique, bool tangents /*= false*/ ) // The commented out bit is the default parameter (can't write it here, only in the declaration)
{
    // Release any existing geometry in this object
    ReleaseResources();
 
    // Use CImportXFile class (from another application) to load the given file. The import code is wrapped in the namespace 'gen'
    gen::CImportXFile mesh;
    if (mesh.ImportFile( fileName.c_str() ) != gen::kSuccess)
    {
        return false;
    }
 
    // Get first sub-mesh from loaded file
    gen::SSubMesh subMesh;
    if (mesh.GetSubMesh( 0, &subMesh, tangents ) != gen::kSuccess)
    {
        return false;
    }


    // Create vertex element list & layout. We need a vertex layout to say what data we have per vertex in this model (e.g. position, normal, uv, etc.)
    // In previous projects the element list was a manually typed in array as we knew what data we would provide. However, as we can load models with
    // different vertex data this time we need flexible code. The array is built up one element at a time: ask the import class if it loaded normals, 
    // if so then add a normal line to the array, then ask if it loaded UVS...etc
    unsigned int numElts = 0;
    unsigned int offset = 0;
    // Position is always required
    m_VertexElts[numElts].SemanticName = "POSITION";   // Semantic in HLSL (what is this data for)
    m_VertexElts[numElts].SemanticIndex = 0;           // Index to add to semantic (a count for this kind of data, when using multiple of the same type, e.g. TEXCOORD0, TEXCOORD1)
    m_VertexElts[numElts].Format = DXGI_FORMAT_R32G32B32_FLOAT; // Type of data - this one will be a float3 in the shader. Most data communicated as though it were colours
    m_VertexElts[numElts].AlignedByteOffset = offset;  // Offset of element from start of vertex data (e.g. if we have position (float3), uv (float2) then normal, the normal's offset is 5 floats = 5*4 = 20)
    m_VertexElts[numElts].InputSlot = 0;               // For when using multiple vertex buffers (e.g. instancing - an advanced topic)
    m_VertexElts[numElts].InputSlotClass = D3D10_INPUT_PER_VERTEX_DATA; // Use this value for most cases (only changed for instancing)
    m_VertexElts[numElts].InstanceDataStepRate = 0;                     // --"--
    offset += 12;
    ++numElts;
    // Repeat for each kind of vertex data
    if (subMesh.hasNormals) 
    {
        m_VertexElts[numElts].SemanticName = "NORMAL";
        m_VertexElts[numElts].SemanticIndex = 0;
        m_VertexElts[numElts].Format = DXGI_FORMAT_R32G32B32_FLOAT;
        m_VertexElts[numElts].AlignedByteOffset = offset;
        m_VertexElts[numElts].InputSlot = 0;
        m_VertexElts[numElts].InputSlotClass = D3D10_INPUT_PER_VERTEX_DATA;
        m_VertexElts[numElts].InstanceDataStepRate = 0;
        offset += 12;
        ++numElts;
    }
    if (subMesh.hasTangents)
    {
        m_VertexElts[numElts].SemanticName = "TANGENT";
        m_VertexElts[numElts].SemanticIndex = 0;
        m_VertexElts[numElts].Format = DXGI_FORMAT_R32G32B32_FLOAT;
        m_VertexElts[numElts].AlignedByteOffset = offset;
        m_VertexElts[numElts].InputSlot = 0;
        m_VertexElts[numElts].InputSlotClass = D3D10_INPUT_PER_VERTEX_DATA;
        m_VertexElts[numElts].InstanceDataStepRate = 0;
        offset += 12;
        ++numElts;
    }
    if (subMesh.hasTextureCoords)
    {
        m_VertexElts[numElts].SemanticName = "TEXCOORD";
        m_VertexElts[numElts].SemanticIndex = 0;
        m_VertexElts[numElts].Format = DXGI_FORMAT_R32G32_FLOAT;
        m_VertexElts[numElts].AlignedByteOffset = offset;
        m_VertexElts[numElts].InputSlot = 0;
        m_VertexElts[numElts].InputSlotClass = D3D10_INPUT_PER_VERTEX_DATA;
        m_VertexElts[numElts].InstanceDataStepRate = 0;
        offset += 8;
        ++numElts;
    }
    if (subMesh.hasVertexColours)
    {
        m_VertexElts[numElts].SemanticName = "COLOR";
        m_VertexElts[numElts].SemanticIndex = 0;
        m_VertexElts[numElts].Format = DXGI_FORMAT_R8G8B8A8_UNORM; // A RGBA colour with 1 byte (0-255) per component
        m_VertexElts[numElts].AlignedByteOffset = offset;
        m_VertexElts[numElts].InputSlot = 0;
        m_VertexElts[numElts].InputSlotClass = D3D10_INPUT_PER_VERTEX_DATA;
        m_VertexElts[numElts].InstanceDataStepRate = 0;
        offset += 4;
        ++numElts;
    }
    m_VertexSize = offset;

    // Given the vertex element list, pass it to DirectX to create a vertex layout. We also need to pass an example of a technique that will
    // render this model. We will only be able to render this model with techniques that have the same vertex input as the example we use here
    D3D10_PASS_DESC PassDesc;
    exampleTechnique->GetPassByIndex( 0 )->GetDesc( &PassDesc );
    Device->CreateInputLayout( m_VertexElts, numElts, PassDesc.pIAInputSignature, PassDesc.IAInputSignatureSize, &m_VertexLayout );


    // Create the vertex buffer and fill it with the loaded vertex data
    m_NumVertices = subMesh.numVertices;
    D3D10_BUFFER_DESC bufferDesc;
    bufferDesc.BindFlags = D3D10_BIND_VERTEX_BUFFER;
    bufferDesc.Usage = D3D10_USAGE_DEFAULT; // Not a dynamic buffer
    bufferDesc.ByteWidth = m_NumVertices * m_VertexSize; // Buffer size
    bufferDesc.CPUAccessFlags = 0;   // Indicates that CPU won't access this buffer at all after creation
    bufferDesc.MiscFlags = 0;
    D3D10_SUBRESOURCE_DATA initData; // Initial data
    initData.pSysMem = subMesh.vertices;   
    if (FAILED( Device->CreateBuffer( &bufferDesc, &initData, &m_VertexBuffer )))
    {
        return false;
    }
edited body
Source Link
jzx
  • 3.8k
  • 2
  • 26
  • 38

Any advice to help diagnose this problem will be much appreciated. I apologiesapologize if I provided to much code.

Any advice to help diagnose this problem will be much appreciated. I apologies if I provided to much code.

Any advice to help diagnose this problem will be much appreciated. I apologize if I provided to much code.

Source Link

Input Assembler - Vertex Shader linkage error

I'm new to HLSL and have been struggling with this problem for a while and I can't figure it out. I'm getting the below error a number of times in the debug window:

D3D11 ERROR: ID3D10Device::DrawIndexed: Input Assembler - Vertex Shader linkage error: Signatures between stages are incompatible. Semantic 'TEXCOORD' is defined for mismatched hardware registers between the output stage and input stage. [ EXECUTION ERROR #343: DEVICE_SHADER_LINKAGE_REGISTERINDEX]

//Vertex shader input
struct VS_NORMALMAP_INPUT
{
float3 Pos     : POSITION;
float3 Normal  : NORMAL;
float3 Tangent : TANGENT;
float2 UV      : TEXCOORD0;
};
 
//Vertex shader output
struct VS_LIGHTING_OUTPUT
{
float4 ProjPos       : SV_POSITION;  // 2D "projected" position for vertex (required output for vertex shader)
float3 WorldPos : POSITION;
float3 WorldNormal : NORMAL;
float3 Tangent : TANGENT;
float2 UV            : TEXCOORD;
};
 
//Vertex shader
VS_BASIC_OUTPUT VS_PlainTexture(VS_BASIC_INPUT vIn)
{
VS_BASIC_OUTPUT vOut;
 
float4 modelPos = float4(vIn.Pos, 1.0f);
float4 worldPos = mul(modelPos, WorldMatrix);
float4 viewPos = mul(worldPos, ViewMatrix);
vOut.ProjPos = mul(viewPos, ProjMatrix);
 
vOut.UV = vIn.UV;
 
return vOut;
}
 
//Pixel Shader
float4 ShadowMapTex(VS_LIGHTING_OUTPUT vOut) : SV_Target
{
float3 modelNormal = normalize(vOut.WorldNormal);
float3 modelTangent = normalize(vOut.Tangent);
 
float3 modelBiTangent = cross(modelNormal, modelTangent);
float3x3 invTangentMatrix = float3x3(modelTangent, modelBiTangent, modelNormal);
 
float3 CameraDir = normalize(CameraPos - vOut.WorldPos.xyz);
 
float3x3 invWorldMatrix = transpose(WorldMatrix);
float3 cameraModelDir = normalize(mul(CameraDir, invWorldMatrix));
 
float3x3 tangentMatrix = transpose(invTangentMatrix);
float2 textureOffsetDir = mul(cameraModelDir, tangentMatrix);
 
float texDepth = ParallaxDepth * (NormalMap.Sample(Trilinear, vOut.UV).a - 0.5f);
 
float2 offsetTexCoord = vOut.UV + texDepth * textureOffsetDir;
 
float3 textureNormal = 2.0f * NormalMap.Sample(Trilinear, offsetTexCoord) - 1.0f;
 
float3 worldNormal = normalize(mul(mul(textureNormal, invTangentMatrix), WorldMatrix));
 
float3 Light1Dir = normalize(LightPos1 - vOut.WorldPos.xyz);
float3 Light1Dist = length(LightPos1 - vOut.WorldPos.xyz);
float3 DiffuseLight1 = LightColour1 * max(dot(worldNormal.xyz, Light1Dir), 0) / Light1Dist;
float3 halfway = normalize(Light1Dir + CameraDir);
float3 SpecularLight1 = DiffuseLight1 * pow(max(dot(worldNormal.xyz, halfway), 0), SpecularPower);
 
float3 Light2Dir = normalize(LightPos2 - vOut.WorldPos.xyz);
float3 Light2Dist = length(LightPos2 - vOut.WorldPos.xyz);
float3 DiffuseLight2 = LightColour2 * max(dot(worldNormal.xyz, Light2Dir), 0) / Light2Dist;
halfway = normalize(Light2Dir + CameraDir);
float3 SpecularLight2 = DiffuseLight2 * pow(max(dot(worldNormal.xyz, halfway), 0), SpecularPower);
 
float4 SpotlightViewPos = mul(float4(vOut.WorldPos, 1.0f), SpotlightViewMatrix);
float4 SpotlightProjPos = mul(SpotlightViewPos, SpotlightProjMatrix);
 
float3 SpotlightDir = normalize(SpotlightPos - vOut.WorldPos.xyz);
 
if (dot(SpotlightFacing, -SpotlightDir) > SpotlightCosAngle) //**** This condition needs to be written as the first exercise to get spotlights working
{
float2 shadowUV = 0.5f * SpotlightProjPos.xy / SpotlightProjPos.w + float2(0.5f, 0.5f);
shadowUV.y = 1.0f - shadowUV.y;
 
float depthFromLight = SpotlightProjPos.z / SpotlightProjPos.w;// - DepthAdjust; //*** Adjustment so polygons don't shadow themselves
 
if (depthFromLight < ShadowMap1.Sample(PointClamp, shadowUV).r)
{
 
float3 SpotlightDist = length(SpotlightPos - vOut.WorldPos.xyz);
diffuseLight3 = SpotlightColour * max(dot(worldNormal.xyz, SpotlightDir), 0) / SpotlightDist;
float3 halfway = normalize(SpotlightDir + cameraDir);
specularLight3 = diffuseLight3 * pow(max(dot(worldNormal.xyz, halfway), 0), SpecularPower);
}
}
 
float3 DiffuseLight = AmbientColour + DiffuseLight1 + DiffuseLight2 + diffuseLight3;
float3 SpecularLight = SpecularLight1 + SpecularLight2 + specularLight3;
 
float4 DiffuseMaterial = DiffuseMap.Sample(Trilinear, offsetTexCoord);
float3 SpecularMaterial = DiffuseMaterial.a;
 
float4 combinedColour;
combinedColour.rgb = DiffuseMaterial * DiffuseLight + SpecularMaterial * SpecularLight;
combinedColour.a = 1.0f; // No alpha processing in this shader, so just set it to 1
 
return combinedColour;
}

Any advice to help diagnose this problem will be much appreciated. I apologies if I provided to much code.