Final Project – Erin Fuller

My Final Project was a maze generator that you can solve!

How to Play: Wait for the maze generator to run in the background, once the loading screen is gone you are ready to play. Use your arrow keys to navigate the green dot through the maze to the red dot.

//Erin Fuller
//Section A
//efuller@andrew.cmu.edu
//Final Project

//This Maze Generator + Game uses first depth search and recursive backtracking
//to generate the maze.  

//https://en.wikipedia.org/wiki/Maze_generation_algorithm#Depth-first_search
//https://en.wikipedia.org/wiki/Maze_generation_algorithm#Recursive_backtracker

var cols, rows; // dividing canvas into grid array
var w = 30; // cell width
var grid = []; 

var sX = 0; // solver x position
var sY = 0; // solver y position

var distance = 0, longdist = 0;
var longest;
var current;
var mazeGen = false; //maze generator not finished

var stack = []; // stack of current cells maze is genrating

// wall array directions 
var up = 0; 
var right = 1;
var down = 2;
var left = 3;

function setup() {
    createCanvas(480, 480);
    cols = width / w;
    rows = height / w;
    for (var i = 0; i < rows; i++) {
        for (var j = 0; j < cols; j++) {
            var cell = new Cell(j, i); //creates cell object
            grid.push(cell); //pushes object into grid array
        }
    }
    current = grid[0]; //starts grid at top left corner
    current.visited = true;
}

function draw() {
    mazeDraw(); 
    if (mazeGen) { 
        solve(); //when maze is finsihed generating it calls a solve function
    } else {
        preText(); //shows instruction while maze is generating
    }
}

function mazeDraw() {
    background(165, 202, 218); //LIGHT BLUE
    for (var i = 0; i < grid.length; i++) {
        grid[i].show();
    } 
    // Step 1: randomly choose of the unvisited neighbors
    var next = current.checkNeighbors();
    if (next) {
        next.visited = true;
        // Step 2: push the current cell to the stack
        stack.push(current);
        // Step 3: Remove wall between current cell and chosen cell
        removeWalls(current, next);
        // Step 4: Make chosen cell the current cell and mark as visited
        current = next;
        distance++;
    } else {
        if (stack.length > 0) { 
        	current = stack.pop();
        } else { 
        	current = grid[0];
    		mazeGen = true;
        	distance;
        }
    }
    if (longdist < distance) {
            longdist = distance;
            longest = current;
    }
}

function index(i, j) {
    if (i < 0 || j < 0 || i > cols - 1 || j > rows - 1) {
        return -1; //return negative/invalid index
    } else {
    	return i + j * cols;
    }
}

function Cell(i, j) {
    this.i = i;
    this.j = j;
    this.walls = [true, true, true, true]; // N, E, S, W
    this.visited = false;

    this.checkNeighbors = function() {
        var neighbors = [];
        var n = grid[index(i, j - 1)]; // top line (N)
        var e = grid[index(i + 1, j)]; //right line (E)
        var s = grid[index(i, j + 1)]; //bottom line (S)
        var w = grid[index(i - 1, j)]; //left line (W)

        //Avoided the "conditional AND operator" because WordPress converts it to a "logical ND" operator
        if (n) {
            if (!n.visited) {
                neighbors.push(n);
            } 
        }
        if (e) {
            if (!e.visited) {
                neighbors.push(e);
            } 
        }
        if (s) {
            if (!s.visited) {
                neighbors.push(s);
            } 
        }
        if (w) {
            if (!w.visited) {
                neighbors.push(w);
            } 
        }

        if (neighbors.length > 0) {
            var r = floor(random(0, neighbors.length));
            return neighbors[r];
        } else {
            return undefined;
        }
    }

    this.show = function() {
        var x = this.i * w;
        var y = this.j * w;
        
        stroke(0);
        strokeWeight(1);

        if (this.walls[up]) {
            line(x, y, x + w, y); // top line (N)
        }
        if (this.walls[right]) {
            line(x + w, y, x + w, y + w); //right line (E)
        }
        if (this.walls[down]) {
            line(x + w, y + w, x, y + w); //bottom line (S)
        }
        if (this.walls[left]) {
            line(x, y, x, y + w); //left line (W)
        }         
    }
}

function removeWalls(a, b) {
    var x = a.i - b.i; //x = diffrence between current cell and its neighbor
    if (x === 1) {
        a.walls[left] = false; //remove left/west wall on current
        b.walls[right] = false; //removes right/east wall on neighbor
    } else if (x === -1) {
        a.walls[right] = false; //remove right/east wall on current
        b.walls[left] = false; //removes left/west wall on neighbor
    }

    var y = a.j - b.j; //x = diffrence between current cell and its neighbor
    if (y === 1) {
        a.walls[up] = false; //remove up/north wall on current
        b.walls[down] = false; //removes down/south wall on neighbor
    } else if (y === -1) {
        a.walls[down] = false; //remove down/south wall on current
        b.walls[up] = false; //removes up/north wall on neighbor
    }
}

function keyPressed() {
	var solver = grid[index(sX, sY)]; 

    //Avoided the "conditional AND operator" because WordPress converts it to a "logical ND" operator
    if (keyCode === LEFT_ARROW) { 
        if (!solver.walls[left]) {
            sX --; 
        } 
    }    

    if (keyCode === RIGHT_ARROW) { 
        if (!solver.walls[right]) {
            sX ++; 
        } 
    }	    

    if (keyCode === UP_ARROW) { 
        if (!solver.walls[up]) {
            sY --; 
        } 
    }

    if (keyCode === DOWN_ARROW) { 
        if (!solver.walls[down]) {
            sY ++;
        } 
    }
}

function solve() { 
	var a = 0.5 * w;
	noStroke();
	
	fill(214, 247, 52); //green "solver" dot 
    var gX = a + (w * sX); //controlled by arrow keys
    var gY = a + (w * sY);	
	ellipse(gX, gY, a, a);
    
    fill(250, 29, 59); //red "end" dot
    var rX = a + (w * longest.i);
    var rY = a + (w * longest.j);
    ellipse(rX, rY, a, a);

    if (dist(gX, gY, rX, rY) < a) { //if green dot reaches red dot
    	winner();
  	}
}

function preText() {
    fill(165, 202, 218); //blue background
    rect(0, 0, width, height);

    textAlign(CENTER);
    
    noStroke();
    fill(255);
    textSize(75);
    text('Get Ready.', width / 2, height / 2);

    textSize(20);
    text('The maze is loading. Use your arrow keys to navigate the green dot to the red dot.', 
        width / 2 - 150, height / 2 + 45, 300, 200);
}

function winner() {
    fill(165, 202, 218); //blue background
    rect(0, 0, width, height);

    textAlign(CENTER);
    noStroke();
    fill(255);

    textSize(87);
    text('YOU WON!', width / 2, height / 2);

    textSize(25);
    text('Refresh to play again.', width / 2, height / 2 + 45);
}

This was a big task for me. The maze was generated using Recursive Backtracking, a form of First Depth Search (more info can be found on this Wikipedia page). In short, the canvas is gridded into cells and the generator randomly chooses a neighbor cell to visit, removing the wall between it, until all the cells have been visited and a maze is created. A lot of this was new concepts for me, so big thanks to Professor Dannenberg for pointing me in the right direction and Dan Shiffman‘s Coding Train for having some material on this.

The end goal is generated to be the furthest point along the maze from the origin. The start/player begins in the top left corner and moves along the maze to the red dot. Getting the solver dot to stay inside the lines was probably the biggest challenge.

Some Errors: The red dot that signifies the target is supposed to be the furthest distance in the maze from the start. Sometimes it works, but sometimes the target is very close to the origin. I think this is because it may classify the last unvisited cell as the furthest even though the may be very close.

Looking Outwards 12 rrandell

http://www.liaworks.com/category/theprojects/

Lia’s direct page^

http://www.liaworks.com/videos/animal-imagination/

Lia’s animal imagination video^

http://sputniko.com/biography/

Sputniko!’s direct page ^

http://sputniko.com/2016/04/redsilkfate/

For my project I am kind of interested in combining generative and interactive art that uses the webcam. I was looking at some artists that do similar things and I saw the artist Lia. She uses interesting tools to generate art, but she doesn’t combine these projects with people very much (in the sense that I would like to) but I am still very intrigued in her methods, like the mechanical plotter drawings. But her most interesting piece to me is her generative video called ‘animal imagination’. This video is essentially a moving drawing which is very soothing (at some points) and hectic (at other points). It looks almost like animated turtle graphics with spirals and almost slinky looking patterns. I am interested in possibly exploring these shapes to use as part of my final project and am interested in learning more about how they move and work.  I am also interested in Japanese artist Sputniko! who uses video art to convey her ideas, like in her video red silk of fate, which was based on a project that she made. The video acted as supplemental to her project, and it was interesting to see how she could use video as apart of her work– as I am thinking of using video the way she does in my project.

Final Project Proposal rrandell

For my final project I would like to make some sort of interactive art using the camera and possibly emotion recognition. I thought that the camera assignment was really interesting and totally rewarding as it was so different from the previous assignments. I was thinking that it may be interesting to make some interactive art when the person in the camera is smiling or frowning or making a face that is clearly some type of emotion. For example, really colorful fireworks when the person in the frame is smiling or raindrops when the person is frowning. I believe that I could make some really interesting forms that generate based off of the expression that a person has on their face and that it will be interesting to explore human emotion further with code. Some resources on this campus that may be able to help me with this may be a teacher at the School of Art, Angela Washko, who has a lot of experience in New Media art and may possibly be a good resource for researching this further.

below is a small sketch to demonstrate, apologies for the poor drawing skills!

Looking Outwards 12 Liz Maday

I am using one of my late days for this post.

One exciting project that I found was the live visuals performed by Mary Franck which accompanied electronic music performed by an assortment of artists, including Holly Herndon, Mike Gao, Luke Dahl, Colin Sullivan, Jennifer Hsu & Locky. This took place at the Modulations showcase in 2013 (an annual event that is hosted by Stanford’s Center for Computer Research in Music and Acoustics).

The performance involved a large screen which was placed behind the musicians on stage, and displayed on the screen was a progression of colorful, moving, geometric designs and dream-like images. The live visuals were created using a TouchDesigner program that Franck developed, known as Rouge, which is used for video performance and creating realtime 3D compositions.

I think this kind of audiovisual performance is really cool because of the way that it feels liberating – all the artists involved are able to realize spontaneous ideas and bring these dynamic ideas to life. This project is relevant to my final project because it uses both sound and visuals to create a whole experience.

Another project that is inspiring to me is the tape bow violin, invented by Laurie Anderson in 1977, which had a large influence in the processes of integrating music and technology. She replaced the horsehair on the violin bow with magnetic tape, and put a tape head on the bridge of the violin. Using this technology, she was able to create non-traditional sounds using the violin by manipulating the sounds captured with the tape (for example, sound could be played backwards or forwards, and rearrange parts of the sound). I think that this project is relevant to my final project because of the way in which it deals with changing conceptions of the sound that can be produced using a traditional instrument. In my project, I intend to use a visual concept of an instrument that has some traditional characteristics, and pair it with non traditional sounds (produced by the user’s keyboard).

A visual representation of the tape bow violin.

I think both of these artists works are interesting to compare because they come from different eras, but both advance the ways in which technology can be integrated with music.

Yiran Xuan – Looking Outwards 12

Greg Borenstein‘s work is quirky and fun, utilizing lofty technology to bring tokens of enjoyment to life. One of these is Ten Seconds, a short game that involves your player ball jumping around trying buy more time for the game by attempting to obtain collectibles, while also avoiding hazards. Containing one interesting and “perpetual” mechanic, this game has the same spirit as the likes of Temple Run, Flappy Bird, and Tiny Wings, “toilet games” as I like to call them, capable of keeping one amused for the duration of a sit, but also having the potential to be competitive and give tryhards a challenge. This is the type of game I’d aspire my own project to be, though it does not have a “perpetual” mechanic. I wish to emulate the clean style and effective sound design of the game, though I wish Mr. Greg would remove gravity from the game and maybe add more depth to the levels, so that it would almost be like an exploration campaign rather than a boxed and contained experience.

Katherine Hua – Project-12 – Proposal

With my proposed collaborator, Min Lee (mslee) from Section A, we are going to make an interactive visual representation of a song (yet to be decided). We want a song that gives off a peaceful and dreary vibe. The visual part will be of a visual sound spectrum based off the wavelengths of the sound that is set in the middle of the canvas. The spectrum will be made up of small circles of varying colors that are constantly drowned out by the background (to give it a look of lagging behind to further reinforce the dreary and peaceful vibe intended). For the interactive component, we wanted to create layers of sound. We will find the individual instruments that come together to make up the song (flute, percussion, voice, etc.). This representation will start off with just one layer (one instrument). With each click of the mouse, a layer of sound will be added. With each layer of sound, a layer of color will be added to the visual representation as well. So we were thinking of having the circles having lower opacities so that different colored circles could come together to form new colors. Of course this means we need to choose appropriate colors that will make actually pretty colors. Below is a quick sketch of what we expect our project to look like.

Our sketch of our project

 

Audrey Zheng – Looking Outwards – 12

This is an audio visualizer created by Willian Justen, Luis Henrique, and Marcio Ribeiro that I found on github. Music visualization, a feature found in electronic music visualizers and media player software, generates animated imagery based on a piece of music. The imagery is usually generated and rendered in real time and in a way synchronized with the music as it is played.

View the page here.

I enjoyed this project because it was cathartic just looking at the page. I liked how the low, loud beats corresponded to larger ellipses forming on the screen. From a artistic standpoint, the project is aesthetically pleasing and calming.

This is a weather visualizer made by George Edmonds. This is rather simple in execution, the site simply fetches the weather data and then chooses from a set of hardcoded icecreams to visualize. Still, I thought it was a fun idea.

Looking Outwards 12 – Min Lee

I am interested in creating an visual representation with audio, much like how Ren Yuan does in her project Sorting. Using Processing, she sorted copious amounts of data and created visual and audio aids to pair with the data actual data being sorted. In Studio Antimateria’s Shape in Scapes, a similar use of data visualization is at work, with students’ architectural projects at three different locations are being abstractly demonstrated. Although I believe that the visual and audio representations were added as aids for the user’s fascination in watching the project, I believe they play just as important roles in making the user interested in otherwise a very confusing experience. For my project, I also wanted to create some visual and audio accompaniment to some third factor.

Sources:

Sorting – Visualisation, sortification and sonification of an algorithm

Shape in Scapes – Transporting architecture into audio-video performance

Katherine Hua – Looking Outwards – 12

“Funky Forest” by Theo Watson and Emily Gobeille (2007)

The first project that I admire is called Funky Forest, created by Emily Gobeille and Theo Watson, creators of high-end interactive installations for children. Funky Forest is an art installation in the Singapore Art Museum acting as an interactive ecosystem in which children can bring trees to life through their bodies and use physical logs to control the flow of water from the waterfall to the trees. The children use this water to water the trees and keep them alive. The health of the forest and everything that resides within it relies on the basic health of the trees. Funky Forest is an interactive and collaborative experience for children to create their own stories and go on their own fantastical adventures. I enjoy this project because it places an emphasis on meaningful interaction and systems build to support open play and discovery while creating a sense of wonder and delight at the same time.
“Replicants” by Lorna Barnshaw (2013)
a printed 3D scan of a human face
a printed result of one of the digital methods
The second project that I admire is the projects of Lorna Barnshaw, a virtual/glitch sculptor. She uses 3D technology to print three very different sculptors, using herself as her model. In her series called Replicants, Barnshaw fuses self portraiture with 3D technology to create sculptures that give off an eerily, sub-human feel to them as Barnshaw uses computer glitches to that are unable to accurately 3D print the scan; thus resulting in a sculpture with humanistic qualities that are distorted at the same time. I admire this project because it finds a platform for fine arts in an area where 3D technology is revolutionizing science, medical, and design worlds.

 

Yiran Xuan – Project 12 – Proposal

My project idea is to recreate the schoolyard clapping game “Shotgun” in Javascript. The game consists of each rounds, within each round two (or more) players face each other and simultaneously play one of three moves: “Shoot”, “Reload”, and “Shield”.  Shielding blocks all incoming bullets, and can be used any round; reloading adds one bullet to the chamber, but leaves the player vulnerable; shooting fires a bullet toward the (or an) opponent, but can only be used if the player has at least one bullet in the chamber already. The game ends when one player plays shoot while their target is reloading. This game has a luck element but is also strategic, good players being able to predict when their opponents would be left vulnerable. Another important element to the game is clapping, which helps ensure simultaneous play; typically, players would clap their hands on their thighs twice before making their move, establishing a beat.

For this project, I intend to represent the two players as two animated dragons spitting fireballs at each other. Players would have 4 separate key each to play, 3 for the moves, and 1 for “clapping”. I will play a short sound regularly to establish a beat, and players would need to press the “clap” button within a certain time frame, or they suffer consequences (like a rhythm game). I would need counters to keep track of the refresh rate. I will also build a random move-generating AI. For most of the interface, I will draw outside of the program and import in as images.