So I came up with the following implementation embedded directly in distance constraints projection (https://matthias-research.github.io/pag ... s/XPBD.pdf, equation 26) but there is something not quite right in its behavior. Would you be so kind to have a quick look. Maybe you would spot an error in code.
Code: Select all
float dtSq = dt * dt;
float alphaHat = compliance / dtSq;
float betaHat = damping * dtSq;
float gamma = (alphaHat * betaHat) / dt;
for (int i = 0; i < cnstrsCount; i++)
{
DistanceConstraint cnstr = cnstrs[i];
//positions at start of the substep
float4 posA = positions[cnstr.idA];
float4 posB = positions[cnstr.idB];
float4 predPosA = predictedPositions[cnstr.idA];
float4 predPosB = predictedPositions[cnstr.idB];
float wA = massesInv[cnstr.idA];
float wB = massesInv[cnstr.idB];
float wSum = wA + wB;
if (cnstr.restLength == 0 || wSum == 0)
continue;
float4 N= predPosB - predPosA;
float len = math.length(N);
if (len <= float.Epsilon)
continue;
float C = len - cnstr.restLength;
N = N/ len;
float4 gradA = -N;
float4 gradB = N;
float4 velA = predPosA - posA;
float4 velB = predPosB - posB;
float numA = C + (alphaHat * lambdasA[i]) + (gamma * (math.dot(gradA, velA)));
float denA = ((1.0f + gamma) * wSum) + alphaHat;
float lambdaA = -numA / denA;
float numB = C + (alphaHat * lambdasB[i]) + (gamma * (math.dot(gradB, velB)));
float denB = ((1.0f + gamma) * wSum) + alphaHat;
float lambdaB = -numB / denB;
//these lambda arrays are zeroed at start of each substep
lambdasA[i] += lambdaA;
lambdasB[i] += lambdaB;
predictedPositions[cnstr.idA] += lambdaA * gradA * wA;
predictedPositions[cnstr.idB] += lambdaB * gradB * wB;
}