精细化了 我发现还是有问题
# Apply a small adjustment to reduce the spread slightly
# Based on historical experience, use a factor between 0.95 and 0.98
fine_tune_factor = 0.97 # Reduce spread by 3%
# Make sure we have the right number of channels (should be 68 for landmarks)
if a_ch != 68:
print(f"Warning: Expected 68 channels for landmarks, got {a_ch}. Using fallback landmarks.")
base_landmarks = LandmarksProcessor.landmarks_2D
# Create fallback landmarks based on center
fallback_pts = base_landmarks.astype(np.float32) * 100 + np.array([center[0], center[1]], dtype=np.float32)
return fallback_pts
b = a.reshape((a_ch, a_h*a_w))
c = b.argmax(1).reshape((a_ch, 1)).repeat(2, axis=1).astype(np.float32) # Force to float32
c[:,0] %= a_w
c[:,1] = np.apply_along_axis(lambda x: np.floor(x / a_w), 0, c[:,1])
for i in range(a_ch):
pX, pY = int(c[i,0]), int(c[i,1])
# Use dynamic boundary check instead of hardcoding 63
if pX > 0 and pX < a_w-1 and pY > 0 and pY < a_h-1:
diff = np.array([a[i,pY,pX+1]-a[i,pY,pX-1], a[i,pY+1,pX]-a[i,pY-1,pX]], dtype=np.float32) # Force to float32
c[i] += np.sign(diff)*0.25
c += 0.5
# Apply a scaling factor to adjust landmark size if needed
pts = np.array([self.transform(c[i], center, scale, a_w) for i in range(a_ch)], dtype=np.float32) # Ensure output is float32
# Add a small offset to adjust for potential bias in the model's output
# This is based on empirical observation that FAN model tends to produce slightly oversized landmarks
offset_factor = 0.95 # Reduce by 5%
# Calculate centroid of landmarks
centroid = np.mean(pts, axis=0)
# Scale distances from centroid by the offset factor
pts = centroid + (pts - centroid) * offset_factor