opengl – Trouble generating a depth map (black screen)

I am having trouble generating a depth map for my scene. I tried to figure out this issue a 2 weeks ago and got nowhere after days of trying to fix it, so I took a break and tried to tackle it again today. I’m still stuck and with no ideas.

Here’s the main code:

#Sets up the depth FBO and the texture that will be used to render to a quad
const unsigned int d_width = 1024, d_height = 1024;

    unsigned int depthFBO;
    glGenFramebuffers(1, &depthFBO);
    glBindBuffer(GL_FRAMEBUFFER, depthFBO);

    unsigned int depthMap;
    glGenTextures(1, &depthMap);
    glBindTexture(GL_TEXTURE_2D, depthMap);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, d_width, d_height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);

    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
    glDrawBuffer(GL_NONE);
    glReadBuffer(GL_NONE);

    if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
        std::cout << "Incomplete stuff!" << std::endl;

    glBindBuffer(GL_FRAMEBUFFER, 0);

    glm::vec3 lightPos(-2.0f, 4.0f, -1.0f);

Rendering loop:

/* Render here */
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glClearColor(0.1f, 0.1f, 0.1f, 1.0f);

        #render scene from the light's perspective
        float near_plane = 1.0f, far_plane = 8.0f;
        glm::mat4 lightProj = glm::ortho(-10.0f, 10.0f, 10.0f, 10.0f, near_plane, far_plane);
        glm::mat4 lightView = glm::lookAt(lightPos, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
        glm::mat4 lightSpaceMat = lightProj * lightView;

        depthShader.use();
        depthS.setUniformMat4("lightSpaceMat", lightSpaceMat);
        glm::mat4 model = glm::mat4(1.0f);
        depthS.setUniformMat4("model", model);

        glViewport(0, 0, d_width, d_height);
        glBindFramebuffer(GL_FRAMEBUFFER, depthFBO);
        glClear(GL_DEPTH_BUFFER_BIT);
        
        #render everything else
        planeVAO.bind();
        model = glm::translate(model, glm::vec3(0.0f, -1.0f, 0.0f));
        depthS.setUniformMat4("model", model);

        glDrawArrays(GL_TRIANGLES, 0, 6);

        cubeVAO.bind();
        model = glm::translate(model, glm::vec3(1.0f, 1.0f, -2.0f));
        model = glm::scale(model, glm::vec3(0.6f));
        depthS.setUniformMat4("model", model);
        glDrawArrays(GL_TRIANGLES, 0, 36);

        model = glm::translate(model, glm::vec3(2.0f, 1.0f, -1.0f));
        depthS.setUniformMat4("model", model);
        glDrawArrays(GL_TRIANGLES, 0, 36);

        #render to the quad now which is supposed to display the depth map
        glBindFramebuffer(GL_FRAMEBUFFER, 0);
        glViewport(0, 0, scr_width, scr_height);
        glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
        
        quadVAO.bind();
        quadShader.use();
        glActiveTexture(GL_TEXTURE15);
        glBindTexture(GL_TEXTURE_2D, depthMap);
        glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);

Here are the shaders:

depth shaders:

#vertex
#version 330 core

layout (location = 0) in vec3 aPos;

uniform mat4 lightSpaceMat;
uniform mat4 model;

void main()
{
    gl_Position = lightSpaceMat * model * vec4(aPos, 1.0);
}

#fragment
#version 330 core

void main()
{
    gl_FragDepth = gl_FragCoord.z;
}

visual quad shaders:

#vertex
#version 330 core

layout (location = 0) in vec3 aPos;
layout (location = 1) in vec2 aTex;

out vec2 TexCoords;

void main()
{
    TexCoords = aTex;
    gl_Position = vec4(aPos, 1.0);
}

#fragment
#version 330 core

out vec4 FragColor;

in vec2 TexCoords;

uniform sampler2D depthMap;

void main()
{
    float depthValue = texture(depthMap, TexCoords).r;
    FragColor = vec4(vec3(depthValue), 1.0);
}

Anyone have any idea what I might be doing wrong? I have tried debugging in every way I could think of and have made no progress.

edit: full source.cpp code:

#include <fstream>
#include <sstream>

#include <iostream>
#include <string>
#include <vector>
#include <glm.hpp>
#include <gtc/matrix_transform.hpp>
#include <gtc/type_ptr.hpp>

#include "Shader.h" 
#include "Textures.h"
#include "Camera.h"
#include "Context.h"
#include "Renderer.h"
#include "DataDefs.h"
#include "Renderer.h"
#include "StaticDataDefs.h"
#include "VertexBuffer.h"
#include "Context.h"
#include "Model.h"

#include <GL/glew.h>
#include <GLFW/glfw3.h>

#include <tuple>
#include <array>

#include "imgui/imgui.h"
#include "imgui/imgui_impl_glfw.h"
#include "imgui/imgui_impl_opengl3.h"
#include <stdio.h>
#include <map>

#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"

float planeVertices() = {
    // positions            // normals         // texcoords
     25.0f, -0.5f,  25.0f,  0.0f, 1.0f, 0.0f,  25.0f,  0.0f,
    -25.0f, -0.5f,  25.0f,  0.0f, 1.0f, 0.0f,   0.0f,  0.0f,
    -25.0f, -0.5f, -25.0f,  0.0f, 1.0f, 0.0f,   0.0f, 25.0f,

     25.0f, -0.5f,  25.0f,  0.0f, 1.0f, 0.0f,  25.0f,  0.0f,
    -25.0f, -0.5f, -25.0f,  0.0f, 1.0f, 0.0f,   0.0f, 25.0f,
     25.0f, -0.5f, -25.0f,  0.0f, 1.0f, 0.0f,  25.0f, 10.0f
};

const unsigned int scr_width = 800;
const unsigned int scr_height = 600;

float quadVertices() = {
    // positions        // texture Coords
    -1.0f,  1.0f, 0.0f, 0.0f, 1.0f,
    -1.0f, -1.0f, 0.0f, 0.0f, 0.0f,
     1.0f,  1.0f, 0.0f, 1.0f, 1.0f,
     1.0f, -1.0f, 0.0f, 1.0f, 0.0f,
};

int main(void)
{
    //Setup all openGL stuff
    setProject init;
    GLFWwindow* window = init.returnWindow();

    Camera camera; 
    camera.setInputs(window, camera);

    glEnable(GL_DEPTH_TEST);

    Shader normal;
    normal.setUpShader("Resources/Shaders/lightObject.vs", "Resources/Shaders/lightObject.fs");
    Shader depthS;
    depthS.setUpShader("Resources/Shaders/depthS.vs", "Resources/Shaders/depthS.fs");
    Shader depthR;
    depthR.setUpShader("Resources/Shaders/depthR.vs", "Resources/Shaders/depthR.fs");

    Context va;
    VertexBuffer vb(vert3d.size() * sizeof(Def3), vert3d.data(), 's', "type3a", GL_FALSE, GL_FLOAT);
    va.addBuffer(vb, "VBO");
    GLCall(va.setVBOAttribs());

    Context va2;
    VertexBuffer vb2(sizeof(quadVertices), &quadVertices(0), 's', "type2a", GL_FALSE, GL_FLOAT);
    va2.addBuffer(vb2, "VBO");
    GLCall(va2.setVBOAttribs());
    
    Context plane;
    VertexBuffer planevb(sizeof(planeVertices), &planeVertices(0), 's', "type3a", GL_FALSE, GL_FLOAT);
    plane.addBuffer(planevb, "VBO");
    GLCall(plane.setVBOAttribs());
   
    depthR.use();
    depthR.setInt("depthMap", 15);

    const unsigned int d_width = 1024, d_height = 1024;

    unsigned int depthFBO;
    glGenFramebuffers(1, &depthFBO);
    glBindBuffer(GL_FRAMEBUFFER, depthFBO);

    unsigned int depthMap;
    glGenTextures(1, &depthMap);
    glActiveTexture(GL_TEXTURE15);
    glBindTexture(GL_TEXTURE_2D, depthMap);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, d_width, d_height, 0, GL_DEPTH_ATTACHMENT, GL_FLOAT, NULL);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);

    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
    glReadBuffer(GL_NONE);
    glDrawBuffer(GL_NONE);

    if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
        std::cout << "Incomplete stuff!" << std::endl;

    glBindBuffer(GL_FRAMEBUFFER, 0);

    glm::vec3 lightPos(2.0f, 4.0f, 1.0f);

    /* Loop until the user closes the window */
    while (!glfwWindowShouldClose(window))
    {
     
        /* Render here */
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glClearColor(0.1f, 0.1f, 0.1f, 1.0f);

        texo.textureBinder(0);

        //set up scene, draw objects, etc.

        camera.processInput(window);
        camera.calcDelta();

        float fov = camera.return_fov();

        float near_plane = 1.0f, far_plane = 8.0f;
        glm::mat4 lightProj = glm::ortho(-10.0f, 10.0f, 10.0f, 10.0f, near_plane, far_plane);
        glm::mat4 lightView = glm::lookAt(lightPos, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
        glm::mat4 lightSpaceMat = lightProj * lightView;

        depthS.use();
        depthS.setUniformMat4("lightSpaceMat", lightSpaceMat);
        glm::mat4 model = glm::mat4(1.0f);
        depthS.setUniformMat4("model", model);
      
        glViewport(0, 0, d_width, d_height);
        glBindFramebuffer(GL_FRAMEBUFFER, depthFBO);
        glClear(GL_DEPTH_BUFFER_BIT);

        depthS.use();

        plane.bind();
        model = glm::translate(model, glm::vec3(0.0f, -1.0f, 0.0f));
        depthS.setUniformMat4("model", model);

        glDrawArrays(GL_TRIANGLES, 0, 6);

        va.bind();
        model = glm::translate(model, glm::vec3(1.0f, 1.0f, -2.0f));
        model = glm::scale(model, glm::vec3(0.6f));
        depthS.setUniformMat4("model", model);
        glDrawArrays(GL_TRIANGLES, 0, 36);

        model = glm::translate(model, glm::vec3(2.0f, 1.0f, -1.0f));
        depthS.setUniformMat4("model", model);
        glDrawArrays(GL_TRIANGLES, 0, 36);

        glBindFramebuffer(GL_FRAMEBUFFER, 0);
        glViewport(0, 0, scr_width, scr_height);
        glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
        
        va2.bind();
        depthR.use();
        glActiveTexture(GL_TEXTURE15);
        glBindTexture(GL_TEXTURE_2D, depthMap);
        glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);

        /* Swap front and back buffers */
        glfwSwapBuffers(window);
        
        /* Poll for and process events */
        glfwPollEvents();
    }

    glfwTerminate();
    return 0;
}

(the “VA” VAO holds the data for the cubes and “VAO2” for the quad)

https://github.com/julkothegu/p_1_test

I Will Do Depth SEO Keyword Research For You’r Targeted Niche for $1

I Will Do Depth SEO Keyword Research For You’r Targeted Niche

I will find the simplest KWs to focus on your niche or businessChoosing the proper KWs for your business is that the most vital a part of SEO KW research. I’ll provide you the simplest suitable and high-quality keywords for your online platform which will assist you to rank #1 on google easily.Silver Package:I’ll research 100 long-tail SEO keywords with very low competitionPrice: 5$Delivery: 1 DayGold Package:I’ll Find Top 200 keywords to focus on for your niche or business + 3 Competitors Keywords & Backlinks + Website AuditPrice:$20Delivery:1 DayPlatinum Package:I’ll Find Top 300 keywords to focus on for your niche or business + 5 Competitors Keyword & Backlinks + Website AuditPrice: 30$Delivery: 1 DayKeyword Report Includes:List of KeywordsLong Tail KeywordsMonthly VolumeCost Per Click (CPC)Keywords Difficulty (KD)Organic CTRWhy you favor my Service over Others?Long-tail SEO keywordsLow competitive keywordsHighly search volumeExcel sheet report24 hours deliveryIndustry ExpertiseArtsBusinessE-CommerceHealth & WellnessMedical & PharmaceuticalIf you’ve got any questions, be happy to contact me.✇✇✇ Order Now ✇✇✇

.(tagsToTranslate)niche(t)keyword(t)research

unity – How to write depth texture and read values from it

I am new in Unity and specializing in another field. But now I have to rapidly study it for a new project. I will be very thankful if anyone explain me how to read values of the depth buffer.
I wrote a shader, which creates depth image:

Shader "Custom/MyDepthShader"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        
    }
    SubShader
    {
        // No culling or depth
        Cull Off ZWrite Off ZTest Always

        Pass
        {
            CGPROGRAM
            #pragma vertex vert // compile function vert as vertex shader
            #pragma fragment frag // compile function frag as fragment shader

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                o.uv = v.uv;
                return o;
            }

            sampler2D _MainTex;
            sampler2D _CameraDepthTexture;

            fixed4 frag (v2f i) : SV_Target
            {
                float depth = tex2D(_CameraDepthTexture, i.uv).r;
                depth = Linear01Depth(depth);
                depth = depth * _ProjectionParams.z;
                return depth;
            }
            ENDCG
        }
    }
}

Now I need to get depth information into a texture and save float values of this texture to for example txt file for further processing.
I didn’t found yet answer to this question after two days of googling and reading guides.
For testing this shader I am using OnImageRender(RT src, RT dst, material).

bootstrap – Eliminar error Maximum update depth exceeded

Estoy usando react-bootstrap-table 2 con filter en algunas columnas, necesito coger los datos filtrados por lo que desde el render de BootstrapTable uso filter:

<BootstrapTable
   {...props.baseProps}
   filter={filterFactory({ afterFilter })}

/>

La funcion filter que llama a un setState:

 const (filteredData, setFilteredData) = React.useState(getData):
 function afterFilter(newResult, newFilters) {
     setFilteredData(newResult);
 }

Al ejecutar y filtrar me da el siguiente error:

Maximum update depth exceeded. This can happen when a component repeatedly calls setState inside componentWillUpdate or componentDidUpdate. React limits the number of nested updates to prevent infinite loops.

¿Cómo puedo solucionarlo?

Rotation of a point in C, depth is messed up

I created a keyboard shape with multiple quads. I would like to apply a pitch rotation for it to be like in the first picture.
It is a rotation with respect to a point: the origin of the view.
My problem is that I get the second picture where you can see that the keys are no longer at the correct depth.

enter image description here

Here is the code I use:, the renderer is based on the idTech3, so axis convention is:
X axis = Left/Right, Y axis = Forward/Backward, Z axis = Up/Down

void rotatePITCH( vec3_t in_out, vec3_t center, float sinAngle, float cosAngle )
{
    float x1 = in_out(2) - center(2);
    float y1 = in_out(1) - center(1); //*

    float x2 = x1 * cosAngle - y1 * sinAngle;
    float y2 = x1 * sinAngle + y1 * cosAngle;

    in_out(2) = x2 + center(2);
    in_out(1) = y2 + center(1); //*
}

More info:

In order to make the correct and first screenshot, I remove the “- center(1 ) ” and “+ center(1 )” from the 2 lines with asterix. Unfortunately with this modification the full keyboard moves far away when the height of the view changes.

I use a rotation relative to a point mainly because I did not manage to make a simple rotation: when I remove the four lines where the “center” point is subtracted and added, the keyboard is no longer in the view, I check the coordinate, the y axis, so center(2 ), is far below the view. I don’t understand why, but problem is surely the same.

Another info: I have the same function for the YAW axis, and it works perfectly. (code from https://stackoverflow.com/a/22491252/3520059 )

Any help appreciated.

Edit:

The center point is player origin, at ground level so the view height is added:

VectorCopy(cent->lerpOrigin, origin);
origin(2) += cg.viewHeight;

Then, I move it in front of the player with:

VectorMA( origin, 70.0f, viewAxis(0), origin );

VectorMA() is idTech3 specific:

// move point v by distance s in the b axis, result is point o
#define VectorMA(v, s, b, o)    ((o)(0)=(v)(0)+(b)(0)*(s),(o)(1)=(v)(1)+(b)(1)*(s),(o)(2)=(v)(2)+(b)(2)*(s))

depth of field – Can you shoot with a 55mm lens so that you have both person and background clear?

I assume that by “clear”, you mean sharp.

The answer depends on what distance you consider “close” and how sharp is “clear” enough for you. You could use an online depth of field calculator to check what’s feasible.

For example, using f/22 and focusing at hyperfocal distance (23 feet) on a Canon 1.6 crop body, your depth of field starts at about 12 feet, so this is the closest distance you can have sharp with this lens and aperture.

If you want to avoid loss of overall sharpness due to diffraction, you should open the aperture to about f/11 and that leaves 23 feet as the closest distance (focusing at hyperfocal distance – 46 feet).

Of course, those are not hard numbers – transition from sharp to non-sharp is gradual, and you might be able to position your front subject closer or be forced to move further depending on how much blur you are willing to accept.

python – Testing Depth First Search Using Pytest

I have an implementation of depth first search, which I want to test using the pytest framework.

I would be making test cases for other graph algorithms, and would like to know if this approach is good enough to be replicated on similar programs.

Here is what it looks like currently:
(This may not be a very exhaustive set of test cases, but my goal is to separate simple checks that are done in the actual implementation)

import pytest
from dfs import dfs_recursive, dfs_iterative


def test_data():
    test_graph = {
        'A' : ('B','S'),
        'B' : ('A'),
        'C' : ('D','E','F','S'),
        'D' : ('C'),
        'E' : ('C','H'),
        'F' : ('C','G'),
        'G' : ('F','S'),
        'H' : ('E','G'),
        'S' : ('A','C','G')
    }

    assert dfs_iterative(test_graph, 'A') is not None
    assert len(dfs_iterative(test_graph, 'A')) == len(list(test_graph.keys()))
    assert dfs_recursive(test_graph, 'A') is not None
    assert len(dfs_recursive(test_graph, 'A')) == len(list(test_graph.keys()))


def test_graph():
    test_graph = {
        1: (2, 3)
    }

    if len(test_graph.keys()) < 2:
        print("nA graph has to have atleast 2 vertices")


def all_unique(x):
    """
    Check if all elements in a list are unique; if not exit erly
    """
    seen = set()
    return not any(i in seen or seen.add(i) for i in x)

def test_unique():
    test_graph = {
        'A': ('B', 'C'),
        'B': ('A', 'D', 'E'),
        'C': ('A', 'F'),
        'D': ('B'),
        'E': ('B', 'F'),
        'F': ('C', 'E')
    }

    assert all_unique(dfs_iterative(test_graph, 'A')) is True
    assert all_unique(dfs_recursive(test_graph, 'A')) is True


def test_vertex1():
    test_graph = {
        'A': ('B', 'C'),
        'B': ('A', 'D', 'E'),
        'C': ('A', 'F'),
        'D': ('B'),
        'E': ('B', 'F'),
        'F': ('C', 'E')
    }

    with pytest.raises(KeyError) as excinfo:
        dfs_iterative(test_graph, 'Z')

    assert 'Z' in str(excinfo.value)
    assert 'KeyError' in str(excinfo.type)
    print('nVertex does not exist')


def test_vertex2():
    test_graph = {
        1: (2, 3),
        2: (2, 3, 4),
        3: (),
        4: (),
    }

    for key, value in test_graph.items():
        if key in value:
            pytest.fail("A vertex cannot point to itself")    # explicitly fails this test

Please suggest any changes, additions or improvements.

depth of field – How can I maximize the “blurry background, sharp subject” (bokeh) effect?

Here’s how to make the background as blurry as possible while keeping the subject sharp.

It’s the contrast between a sharp subject and a very blurry background that makes this effect stand out. Simply setting a wide aperture and getting a shallow depth of field is not how you get this effect, because then the subject may not be fully in focus. Background blur depends not just on the aperture setting, but also on the positioning of the camera, subject, and background, and on the the focal length of the lens.

First, decide how large the subject should appears within the frame. That’s the magnification (relative to the frame size, ignoring differences between formats). The magnification is an important aspect of the composition that will probably override all other considerations for sharpness and blurriness, so this procedure assumes the relative magnification will be decided first and held constant.

Next, find the largest aperture (smallest f-number) that keeps the subject entirely in sharp focus. That means the focus field must be just deep enough to include the subject front-to-back, with nothing in front of or behind the subject in sharp focus. Focus must also be set accurately to include the entire subject. Note that the subject appearing sharp on a screen (or in a print) depends on how it is viewed. If you are viewing the image on a screen at reduced resolution or from a far distance, more of the image will appear sharp (that is, the focus field will be deeper). So try to emulate the final viewing conditions as best you can. If you want the image to appear sharp according to the limit of your camera’s resolution, use your camera’s focus magnification feature to zoom in all the way as you are adjusting the aperture and focus. For a given magnification and format, the focus field’s depth depends largely on the f-number, and is largely independent of the distance between the camera and the subject, and the focal length. So with the aperture now set, it shouldn’t require much adjustment from this point on. (Though with the subject very close to the camera and with a very wide lens, the focus field is deeper for a given f-number and format.)

With the magnification and the f-number now set and held constant, background blur is maximized by maximizing the camera-to-subject distance and the subject-to-background distance. Longer focal length lenses let you move the camera further from the subject and increase the background blur while maintaining the desired magnification.

Shooting indoors, where the camera-to-background distance is constrained, background blur is maximized by placing the camera as far as possible from the background, and placing the subject halfway in between. If your longest lens doesn’t give you enough magnification, move the subject closer until you get the magnification you want.

Shooting outdoors where the distance to the background is large, use your longest lens and back the camera far enough away from the subject to achieve the desired magnification. A more distant background will appear blurrier, but the effect increases more slowly as the distance approaches infinity, so don’t worry about trying to make the subject-to-background distance really large.

Note that moving the camera back also changes the perspective, making background objects appear larger relative to the subject.

A note about formats and lenses: If you are shooting with high magnification, the focus field will be shallow even at moderate f-numbers, and so having a fast lens is not important. In fact you might not be able to get the entire subject in focus even at the lens’s smallest aperture setting. Smaller format cameras have lenses with smaller apertures, which can overcome this problem. If you are shooting with low magnification (the subject is far away or the angle of view is wide), the focus field can be deeper than you want even with a fast lens. Larger format cameras can overcome this problem by having lenses with larger apertures.

Show that any sorting network on n inputs has depth at least log n

Hi just starting to look at Parallel Algorithms and not sure about how to approach this any help would be great

lens – Is the sharpness of imaging different within the depth of field?

The clearest image of an object is always when the object is in focus — at the plane of focus. That is, for a given lens of focal length ƒ, when the lens is positioned a distance v from the camera sensor, then an object at distance u from the lens is in focus in accordance with the thin lens approximation, ƒ-1 = u-1 + v-1.