unity – Passing touch position to compute shader

I am working with a plugin to get this effect: Effect

Problem:

The shaders are able to create the effect I wanted but the effect is created in the wrong position.

Implementation:

I have one compute shader which takes 2 parameters namely force origin and force direction. The force direction is working properly as the deformation happens in the direction of the finger swipe. I am passing the force origin value to the shader using “RaycastHit.textureCoord”. If I tap at the bottom left corner on the image, the effect appears at the center of the image. Below is the compute shader code:

// StableFluids - A GPU implementation of Jos Stam's Stable Fluids on Unity
// https://github.com/keijiro/StableFluids
 
#pragma kernel Advect
#pragma kernel Force
#pragma kernel PSetup
#pragma kernel PFinish
#pragma kernel Jacobi1
#pragma kernel Jacobi2
 
// Common parameter
float Time;
float DeltaTime;
 
// External force
float2 ForceOrigin;
float2 ForceVector;
float ForceExponent;
 
// U (velocity field)
Texture2D<float2> U_in;
SamplerState samplerU_in;
RWTexture2D<float2> U_out;
 
// W (velocity field; working)
Texture2D<float2> W_in;
RWTexture2D<float2> W_out;
 
// Div W
RWTexture2D<float> DivW_out;
 
// P (pressure field)
Texture2D<float> P_in;
RWTexture2D<float> P_out;
 
// Color map
Texture2D<half4> C_in;
SamplerState samplerC_in;
RWTexture2D<half4> C_out;
 
// Jacobi method arguments
float Alpha, Beta;
 
Texture2D<float> X1_in;
Texture2D<float> B1_in;
RWTexture2D<float> X1_out;
 
Texture2D<float2> X2_in;
Texture2D<float2> B2_in;
RWTexture2D<float2> X2_out;
 
// Advect step
(numthreads(8, 8, 1))
void Advect(uint2 tid : SV_DispatchThreadID)
{
    uint2 dim;
    W_out.GetDimensions(dim.x, dim.y);
 
    float2 uv = (tid + 0.5) / dim;
    float2 duv = U_in(tid) * float2((float)dim.y / dim.x, 1) * DeltaTime;
 
    W_out(tid) = U_in.SampleLevel(samplerU_in, uv - duv, 0);
}
 
// Add-force step
(numthreads(8, 8, 1))
void Force(uint2 tid : SV_DispatchThreadID)
{
    uint2 dim;
    W_out.GetDimensions(dim.x, dim.y);
 
    float2 pos = (tid + 0.5 - dim * 0.5) / dim.y;
    float amp = exp(-ForceExponent * distance(ForceOrigin, pos));
 
    W_out(tid) = W_in(tid) + ForceVector * amp;
}
 
// Setup for Project step (divW calculation)
(numthreads(8, 8, 1))
void PSetup(uint2 tid : SV_DispatchThreadID)
{
    uint2 dim;
    W_in.GetDimensions(dim.x, dim.y);
 
    DivW_out(tid) = (W_in(tid + int2(1, 0)).x - W_in(tid - int2(1, 0)).x +
                     W_in(tid + int2(0, 1)).y - W_in(tid - int2(0, 1)).y) * dim.y / 2;
 
    P_out(tid) = 0;
}
 
// Finishing for Project step (divergence free field calculation)
(numthreads(8, 8, 1))
void PFinish(uint2 tid : SV_DispatchThreadID)
{
    uint2 dim;
    W_in.GetDimensions(dim.x, dim.y);
 
    if (any(tid == 0) || any(tid == dim - 1)) return;
 
    float P1 = P_in(max(tid - int2(1, 0), 1));
    float P2 = P_in(min(tid + int2(1, 0), dim - 2));
    float P3 = P_in(max(tid - int2(0, 1), 1));
    float P4 = P_in(min(tid + int2(0, 1), dim - 2));
 
    float2 u = W_in(tid) - float2(P2 - P1, P4 - P3) * dim.y / 2;
 
    U_out(tid) = u;
 
    if (tid.x == 1) U_out(int2(0, tid.y)) = -u;
    if (tid.y == 1) U_out(int2(tid.x, 0)) = -u;
    if (tid.x == dim.x - 2) U_out(int2(dim.x - 1, tid.y)) = -u;
    if (tid.y == dim.y - 2) U_out(int2(tid.x, dim.y - 1)) = -u;
}
 
// Jacobi method with a scalar field
(numthreads(8, 8, 1))
void Jacobi1(uint2 tid : SV_DispatchThreadID)
{
    X1_out(tid) = (X1_in(tid - int2(1, 0)) + X1_in(tid + int2(1, 0)) +
                   X1_in(tid - int2(0, 1)) + X1_in(tid + int2(0, 1)) + Alpha * B1_in(tid)) / Beta;
}
 
// Jacobi method with a vector field
(numthreads(8, 8, 1))
void Jacobi2(uint2 tid : SV_DispatchThreadID)
{
    X2_out(tid) = (X2_in(tid - int2(1, 0)) + X2_in(tid + int2(1, 0)) +
                   X2_in(tid - int2(0, 1)) + X2_in(tid + int2(0, 1)) + Alpha * B2_in(tid)) / Beta;
}

What kind of conversion is required to the touch input position so that the effect appears where the finger is touched?

I would appreciate any suggestions and thoughts on this topic. Thank you.

unity – Only 1 TextMeshProUGUI being affected by coroutine

I have a prefab m_wordContainer which is empty other than having a TextMeshProUGUI as a child (I did this so that I could position this container freely not being bugged by an animation played by the TMPUGUI object). At the start of the program I start a coroutine (SpawnWordPopups) which splits the words in a sentence, making a copy of m_wordContainer and assigning that word to the text property of its TMPUGUI, spawning such m_wordContainer in a random location within the screen, and then starting a coroutine (FadeText) that fades in the text of every TMPUGUI.
The problem is that only one of the TMPUGUI has its text faded, even though the FadeText coroutine is called seemingly correctly.
All words are shown as expected, only thing missing is the fading effect on all but the first word that fades.
If I called FadeText like this: if (i > 0) { yield return FadeText(true, tmpText, 2); } then only the second word will be faded.

 (SerializeField) private GameObject m_wordContainer;
 
    void Start()
    {
        yield return StartCoroutine(SpawnWordPopups("test foo bar"));
    }
 
private IEnumerator SpawnWordPopups(string l_line)
    {
        string() words = l_line.Split(' ');
        List<Vector3> wordPositions = new List<Vector3>();
        for (int i = 0; i < words.Length; ++i)
        {
            Vector3 spawnLocation = new Vector3(Random.Range(m_wordScreenDistanceOffset, Screen.width - m_wordScreenDistanceOffset), Random.Range(m_wordScreenDistanceOffset, Screen.height - m_wordScreenDistanceOffset), 0);
            if (IsWordOverlapping(wordPositions, spawnLocation))
            {
                --i;
                continue;
            }
            wordPositions.Add(spawnLocation);
            GameObject wordContainer = Instantiate(m_wordContainer, transform.parent);
            wordContainer.transform.position = spawnLocation;
            TextMeshProUGUI tmpText = wordContainer.GetComponentInChildren<TextMeshProUGUI>();
            tmpText.text = words(i);
 
            yield return FadeText(true, tmpText, 2);
        }
    }
 
    private IEnumerator FadeText(bool l_fadeIn, TextMeshProUGUI l_textBox, float l_time)
    {
        float time = Time.time;
        float until = Time.time + l_time;
        int alphaFrom = l_fadeIn ? 0 : 1;
        int alphaGoal = l_fadeIn ? 1 : 0;
        Color textColor = l_textBox.color;
        textColor.a = l_fadeIn ? 0 : 1;
        l_textBox.color = textColor;
 
        while (time < until)
        {
            textColor = l_textBox.color;
            textColor.a = Mathf.Lerp(alphaFrom, alphaGoal, Time.time / l_time);
            l_textBox.color = textColor;
            time += Time.deltaTime;
            yield return null;
        }
        textColor.a = l_fadeIn ? 1 : 0;
        l_textBox.color = textColor;
    }

Could anyone shed some light into this issue I’m having? Why are not all the words being faded?

unity – Implement navmesh for climbing cliffs and walls

I am trying to implement dynamic wall climbing similar to Conan exiles and Legend of Zelda: Breath of the Wild.

My system as of now uses a raycast to get the direction of a geometry’s normal, and I make the player face that. I send another raycast in the direction of the players input, which takes the hit point, and lerps the player to that ray’s hit position on the geometry.

Although this system works well on a flat wall it struggles a lot on complex geometry such as a bumpy cliff, and constantly hangs on small edges.

I recently saw this video from the cancelled game Worlds Adrift that seems to implement some sort of navmesh by making a flat plane that follows the terrain’s normals. This allows the player to climb over gaps and complex geometry as if it was a smooth surface.

Clip from Worlds Adrift showing navmesh patch

I don’t completely understand how they managed this, and I was wondering if someone who is knowledgeable about these things could explain how I could implement something like this in Unity.

unity – Panning value of 2nd GameObject’s AudioSource is not initializing

I m making a prototype and stuck in a problem where 2 gameobjects swap but there values of panStereo of AudioSource is not initializing from a float array which contains all the Default Value of panStereo of all the gameObject in a scene

 panningArray = new float(Inventory.bowlsManager.BowlPanningValues.Length);
    for (int i = 0; i < Inventory.bowlsManager.BowlPanningValues.Length; i++)
    {
        panningArray(i)=Inventory.bowlsManager.BowlPanningValues(i);
    }
    Selectable = false;
    SelectedBowl.GetComponent<AudioSource>().spatialBlend = SelectedBowl2.GetComponent<AudioSource>().spatialBlend = 1;
    iTween.MoveTo(SelectedBowl, iTween.Hash("position", temp2, "time", transitionspeed));
    iTween.MoveTo(SelectedBowl2, iTween.Hash("position", temp, "time", transitionspeed));

    SelectedBowl.GetComponent<Renderer>().material = SubsituteMaterial;
    SelectedBowl2.GetComponent<Renderer>().material = SubsituteMaterial;
    int SelectedBowlIndex = Array.FindIndex(Inventory.allBowls.ToArray(), x => x == SelectedBowl.GetComponent<Bowl>()); ;
    
    int SelectedBowlIndex2 = Array.FindIndex(Inventory.allBowls.ToArray(), x => x == SelectedBowl2.GetComponent<Bowl>());
    print(Inventory.bowlsManager.BowlPanningValues(SelectedBowlIndex2));
    print(Inventory.bowlsManager.BowlPanningValues(SelectedBowlIndex));

   val1=  BowlPanning(SelectedBowl2 , panningArray(SelectedBowlIndex));
     val2= BowlPanning(SelectedBowl , panningArray(SelectedBowlIndex2) );

Here is My Bowl panning Function:

 public float BowlPanning(GameObject BowlRequire,float value)
{
    BowlRequire.GetComponent<AudioSource>().panStereo = value;

    return BowlRequire.GetComponent<AudioSource>().panStereo;

}

unity – 2D Finding an algorithm to check pizza toppings positions

Using unity 3D I am creating a 2D pizza game and I am stuck at finding an algorithm to solve this problem which is Detecting if the pizza is half and half taking into consideration that the user can put the toppings in all rotations shown in the picture and lot more possible distributions.

I used Physics2D.OverlapAreaAll to get position of ingredients on the pizza and I tried getting the sumX and sumY of coordinates of all topping A and sumX and sumY of all topping B and adding A.sumX + B.sumX and A.sumY + B.sumY and if the 2 totals are between 0 and 1 then A and B are on opposite sides but the bad distribution of toppings in the second pic is also accepted by my algorithm. The toppings must be spread like in the 1st pic

I need some easier way to detect the correct distribution of ingredients maybe using collisions or something.

if (sumX > -ErrLvl && sumX < ErrLvl && sumY > -ErrLvl && sumY < ErrLvl)  
            {
                Debug.Log("APPROVED HALF-HALF PIZZA");
            }
            else
                Debug.Log("BAD HALF-HALF PIZZA");

Correct distribution

Bad distribution

unity – How to remap Ctrl-Alt-Tab to Ctrl

Long story short: I have MX master 2s mouse whose thumb button sends <ctrl><alt><tab>.

I want to remap the thumb button to <ctrl> but that does not seem to be possible because there are no supported software for linux.

Hence I would like to do the next best thing, which is to try to remap <ctrl><alt><tab> to <ctrl> and hope it works.

Is there any way to achieve what I want?

OS: Ubuntu 20.04 with Unity

Apparently, Unity decided to import a bunch of things I already had

enter image description here

I am getting lots of errors ever since Unity decided to reimport plugins and Unity assemblies that were already present in my project. I am not sure what I can now do, what folders to delete to fix this issue, etc. Is there any simple solution to fixing all of these problems? I cannot even click play in the Unity Editor now.

c# – Unity, Pixel Perfect and Scaling

I am totally stuck on with pixel perfectness of the game. I have tried a few pixel perfect camera -assets, but they have lots of issues considering scalability. is it normal that these cameras zoom in and out, instead of scaling the camera screen when using different resolutions for the game. Because this is a huge problem for me, cause i need to make a game for aspect ratio of 16:10, and every resolution that has the same aspect ratio is needed to scale it correctly keeping the pixel perfectness. The ortographic camera size is dependent of screen width (width / PPU / 2) So it always zooms in or out with different resolutions.

Question is: Is there a way to make it scale the game sprites instead of zooming the view?

unity – Get MouseUp event outside of Sceneview scene window?

So I have an editor script that uses a dragging operation which works quite nicely. Except, if you release the button while OUTSIDE the Scene Window, the event doesn’t register.

How do I detect mouseup outside of the scene window?

private static void OnScene(SceneView sceneview)
{
    Event e = Event.current;
    if (e.type == EventType.MouseDrag)
    {
        //do some drag stuff
        e.Use();
    }

    if (e.type == EventType.MouseUp && e.button == 1)
    {
        //is the Right Mouse button released?
    }
}

I suspect somehow it’s due to my Event e being inside the OnScene making the mouse actions only existing in the context of the Scene Window perhaps? I have no idea, very new to Editor scripting…

unity3d – C# and unity. Moving an object to a target and destroy target once we’re close

This is my first ever c# script. I have played around a little and I think this is some proper c# code. Since it’s my very first c# ever, I’m assuming there’s a few mistakes, things that could be better, bad conventions and such. If you see any, please let me know so I can improve!

using System.Collections;
using System;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.AI;

public class Mover : MonoBehaviour
{
    (SerializeField) GameObject target;  
    private GameObject player; // creating a variable for the player object
    
    void Update()
    {
        if (target != null) { // if the target is already destroyed, we don't have to do anything anymore
            player = this.gameObject; // the script is attached to the player
            

            // Here I expected I could get the NavMeshAgent component via something like player.NavMashAgant. 
            // I do the same later for target.transform. 
            // Is there a reason this component is not accessible via dot notation?
            GetComponent<NavMeshAgent>().destination = target.transform.position; // set the players destination as the targets potion. It starts moving automatically. 
            if (AreClose(player, target)) { 
                Destroy(target);
            }
        }
    }

    private static double Distance(GameObject a, GameObject b)
    {
        Vector3 aPos = a.transform.position;
        Vector3 bPos = b.transform.position;
        // A really long line.. What is the convention to make this more readable?
        return Math.Sqrt(Math.Pow(aPos.x - bPos.x, 2) + Math.Pow(aPos.y - bPos.y, 2) + Math.Pow(aPos.z - bPos.z, 2));

    }

    private static bool AreClose(GameObject a, GameObject b)
    {
        double result = Distance(a, b);
        if (result < 2.5) // random magic number for now
        {
            return true;
        } else
        {
            return false;
        }
    }
}