Project 12 – Yugyeong Lee

I will be collaborating with Nahyun Kim for the final project.

For this project, we are planning to create an interactive program that reacts to sound. With possibility to include audio in-put from built in microphone, the program will detect frequency, amplitude, etc. to generate different graphical animation. Through using p5.sound functions, we’ll load sound, get sound from input source, get the current volume, analyze the frequency, and utilize these variables to control size, color, location of graphical representations. The interesting aspect of this project is exploring different sound related functions that we haven’t really worked with with past projects. If we end up loading different music to display the program (instead of incorporating built-in microphone), we will incorporate mouse-press to change to the next song either by clicking different regions of the canvas or different icons. Once we have the basic reactor program set up, we will explore variety of expression in displaying different variables of sound and hope to include more interactive aspects to the project.

Looking Outwards 11 – Yugyeong Lee

SYN-Phon is a sound performance based on graphical notation by Candaş Şişman that reflects role of art in communication and comprehension. The performance is based on intimate collective findings in Budapest by Candas himself. In communicating music through visual graphics, the project hopes to reach out for audience through sensual expressive language. The project is inspiring in that it reflects the creator’s artistic sensibilities through his representation of certain type of music that allows the audience to explore and question how the graphical representation ties in with the sound. While watching the video, the viewer can also think through how he or she will represent the atmosphere of the sound and understand the vibe of the music through visual representation through algorithm.

http://www.csismn.com/SYN-Phon

Project 10 – Yugyeong Lee

sketch

//Yugyeong Lee
//Section B
//yugyeonl@andrew.cmu.edu
//Project-10

var stars = [];		//array of stars
var clouds = [];	//array of clouds
var landscapes = [];	//array of landscapes
var camera;

function preload() {
	//loading previously-made (through code) camera image
	var cameraImage = "https://i.imgur.com/5joEquu.png"
	camera = loadImage(cameraImage);
}

function setup() {
    createCanvas(480, 360);
    //create initial set of stars
    for (var i = 0; i < 100; i++) {
    	var starX = random(width);
    	var starY = random(3*height/4);
    	stars[i] = makeStars(starX, starY);
    }
    //create initial set of clouds
    for (var i = 0; i < 4; i++) {
    	var cloudX = random(width);
    	var cloudY = random(height/2);
    	clouds[i] = makeClouds(cloudX, cloudY);
    }
    //create mountain
    makeLandscape(height-100, 120, 0.0001, .0075, color(20));
    //create ocean
    makeLandscape(height-50, 20, 0.0001, .0005, color(42, 39, 50));
}

function draw() {
	//gradient background
    var from = color(24, 12, 34);
	var to = color(220, 130, 142);
    setGradient(0, width, from, to);
    //stars
	updateAndDisplayStars();
	removeStars();
	addStars();
	//moon
	makeMoon();
    //clouds
	updateAndDisplayClouds();
	removeClouds();
	addClouds();
	//landscape
	moveLandscape();
    //reflection of moon on ocean
    ellipseMode(CENTER);
    fill(243, 229, 202, 90);
    ellipse(3*width/4, height-50, random(50, 55), 4);
    ellipse(3*width/4, height-35, random(35, 40), 4);
    ellipse(3*width/4, height-26, random(25, 30), 4);
    ellipse(3*width/4, height-17, random(10, 15), 4);
    ellipse(3*width/4, height-8, random(35, 40), 5);
	fill(204, 178, 153, 50);
    ellipse(3*width/4, height-50, random(70, 80), 8);
    ellipse(3*width/4, height-35, random(50, 60), 8);
    ellipse(3*width/4, height-26, random(70, 80), 8);
    ellipse(3*width/4, height-17, random(30, 40), 8);
    ellipse(3*width/4, height-8, random(60, 70), 10);
	//camera LCD display
	push();
	translate(65, 153);
	scale(.475, .46);
	var from = color(24, 12, 34);
	var to = color(220, 130, 142);
    setGradient(0, width, from, to);
    //stars
	updateAndDisplayStars();
	removeStars();
	addStars();
	//moon
	makeMoon();
    //clouds
	updateAndDisplayClouds();
	removeClouds();
	addClouds();
	//landscape
	moveLandscape();
    //reflection
    ellipseMode(CENTER);
    fill(243, 229, 202, 90);
    ellipse(3*width/4, height-35, random(50, 55), 6);
    ellipse(3*width/4, height-28, random(35, 40), 4);
    ellipse(3*width/4, height-19, random(25, 30), 4);
    ellipse(3*width/4, height-10, random(10, 15), 4);
	fill(204, 178, 153, 50);
    ellipse(3*width/4, height-35, random(70, 80), 8);
    ellipse(3*width/4, height-28, random(50, 60), 8);
    ellipse(3*width/4, height-19, random(70, 80), 8);
    ellipse(3*width/4, height-10, random(30, 40), 8);
	pop();
	//camera
	image(camera, 0, 0);
	//camera crosshair
	noFill();
	strokeWeight(.25);
	stroke(235, 150);
	rect(75, 163, 200, 140);
	rect(85, 173, 180, 120);
	line(170, 233, 180, 233);
	line(175, 228, 175, 238);
	//battery symbol
	strokeWeight(.5);
	rect(94, 279, 18.25, 6);
	noStroke();
	fill(235, 150);
	for (i = 0; i< 4; i++) {
		rect(95+i*4.25, 280, 4, 4);
	}
	rect(112.25, 280, 2, 3);
	//REC text
	fill(235);
	textSize(7);
	text("REC", 245, 184);
	fill(225, 100, 0);
	ellipse(238, 182, 7, 7);
	//camera tripod
	fill(30);
	rect(width/2-50, height-26, 100, 20);
	fill(25);
	rect(width/2-75, height-16, 150, 50, 10);
}

function setGradient (y, w, from, to) {
	// top to bottom gradient (background)
    for (var i = y; i <= height; i++) {
      var inter = map(i, y, y+w, 0, 1);
      var c = lerpColor(from, to, inter);
      stroke(c);
      strokeWeight(2);
      line(y, i, y+w, i);
	}
}

function makeMoon() {
	ellipseMode(CENTER);
	for (var i = 0; i < 30; i++) {
		//glowing gradient moonlight through array & randomizing value
		var value = random(7, 8);
		var transparency = 50-value*i;
		var diam = 80;
		fill(243, 229, 202, transparency);
		ellipse(3*width/4, 90, diam+10*i, diam+10*i);
	}
	//the moon
	fill(204, 178, 153);
	ellipse(3*width/4, 90, diam, diam);
}

function makeLandscape(landscapeY, landscapeR, landscapeS, landscapeD, landscapeC) {
	var landscape = {ly: landscapeY,		//locationY
					 range: landscapeR,		//range of how far landscape goes up
					 speed: landscapeS,		//speed of the landscape
					 detail: landscapeD,	//detail (how round/sharp)
					 color: landscapeC,		//color of the landscape
					 draw: drawLandscape}
	landscapes.push(landscape);
}

function drawLandscape() {
	//generating landscape from code provided
	fill(this.color);
	beginShape();
	vertex(0, height);
	for (var i = 0; i < width; i++) {
		var t = (i*this.detail) + (millis()*this.speed);
        var y = map(noise(t), 0,1, this.ly-this.range/2, this.ly+this.range/2);
        vertex(i, y); 
	}
	vertex(width, height);
	endShape(CLOSE);
}

function moveLandscape() {
	//move the landscape
	for (var i = 0; i < landscapes.length; i++) landscapes[i].draw();
}

function updateAndDisplayStars() {
	//update the stars' position & draw them
	for (var i = 0; i < stars.length; i++) {
		stars[i].move();
		stars[i].draw();
	}
}

function makeStars(starX, starY) {
	var star = {x: starX,					//locationX of star
				y: starY,					//locationY of star
				speed: -random(0, .005),	//speed of the star
				move: moveStars,			
				draw: drawStars}
	return star;
}

function drawStars() {
	noStroke();
	//setting transparency at random to have twinkling effect
	var transparency = random(50, 200);
	fill(255, transparency);
	ellipse(this.x, this.y, 1.25, 1.25);
}

function moveStars() {
	//move stars by updating its x position
	this.x += this.speed;
}

function removeStars() {
	var keepStars = []; //array of stars to keep
	for (var i = 0; i < stars.length; i++) {
		if (0 < stars[i].x < width) {
			keepStars.push(stars[i]);
		}
	}
	stars = keepStars;	//remember the surviving stars
}

function addStars() {
	//new stars from the right edge of the canvas
	var newStarsProbability = 0.0025;
	//likliness of new stars
	if (random(0, 1) < newStarsProbability) {
    	var starX = width;
    	var starY = random(3*height/4);
		stars.push(makeStars(starX, starY));
	}
}
//clouds
function updateAndDisplayClouds() {
	//update the clouds' position & draw them
	for (var i = 0; i < clouds.length; i++) {
		clouds[i].move();
		clouds[i].draw();
	}
}

function makeClouds(cloudX, cloudY) {
	var cloud = {x: cloudX,					//locationX of cloud
				y: cloudY,					//locationY of the cloud
				breadth: random(200, 300),	//width of the cloud
				speedC: -random(.3, .5),	//speed of the cloud
				nFloors: round(random(2,6)),//multiplier that determines the height of the cloud
				transparency: random(20, 60),//transparency of the cloud
				move: moveClouds,
				draw: drawClouds}
	return cloud;
}

function drawClouds() {
	var multiplier = 5;	//multiplier that determines the height of the cloud
	var cloudHeight = this.nFloors*multiplier;
	ellipseMode(CORNER);
	noStroke();
	fill(255, this.transparency);
	push();
	translate(this.x, height/2-80);
	ellipse(0, -cloudHeight, this.breadth, cloudHeight/2);
	pop();
	push();
	translate(this.x, height/2-100);
	ellipse(30, -cloudHeight, this.breadth, cloudHeight);
	pop();
}

function moveClouds() {
	//move stars by updating its x position
	this.x += this.speedC;
}

function removeClouds() {
	var keepClouds = [];	//array of clouds to keep
	for (var i = 0; i < clouds.length; i++) {
		if (clouds[i].x + clouds[i].breadth > 0) {
			keepClouds.push(clouds[i]);
		}
	}
	clouds = keepClouds;	//remember the surviving clouds
}

function addClouds() {
	//new clouds from the right edge of the canvas
	var newCloudsProbability = 0.005;
	//likliness of new clouds
	if (random(0, 1) < newCloudsProbability) {
    	var cloudX = width;
    	var cloudY = random(height/2);
		clouds.push(makeClouds(cloudX, cloudY));
	}
}

I first visualized the generative landscape through the lens of a camera. As if to record the moving landscape on the background, the LCD display shows the night sky with twinkling stars as well as clouds, mountains, and flowing body of water. I created the camera through code but in realizing that I cannot make a transparent LCD display, I took a screenshot of the camera I generated and created a transparent layer in Photoshop and included it in my code as an image. I focused on creating depth with this generative landscape project through different layers and wanted to make sure that even the objects with subtle movement such as the moon and the stars have movement through having blinking and glowing effect.

Looking Outwards 10 – Yugyeong Lee

Toni Dove is one of the pioneers of interactive cinema who combines film, installation, and performance with contemporary narrative trends, often reflecting feminist take on popular subjects. She has worked with different institutions including Banff Centre for the Arts, ZKM, Whitney Museum of American Art, etc. to showcase her projects. One of her project, Lucid Possession, is a live cinema performance using “motion-sensing technologies to perform complex layers of media.” The performance sings and speaks through an Avatar which represents “an online alter ego.” The narrative unfolds as Bean, the designer of the virtual avatars, is plagued by ghosts; the story reveals inner voice battle with the real and virtual self. Her interactive project is inspiring in that it combines both real and virtual aspect through incorporating technology within the performance as well as in its narrative to reflect the on-going popular topic.

website: https://tonidove.com/

Project 09 – Yugyeong Lee

sketch

//Yugyeong Lee
//Section B
//yugyeonl@andrew.cmu.edu
//Project 09

var imgYugy;

function preload() {
    var yugyURL = "https://i.imgur.com/Ghau5tT.jpg"; 
    //load image using the URL
    imgYugy = loadImage(yugyURL);
}

function setup() {
    createCanvas(480, 480);
    background(255);

    imgYugy.loadPixels();
    //how fast curves get drawn on canvas
    frameRate(750);
}
 
function draw() {
    //calling created functions
    circleYugy();
    curveYugy();
}

//circular pixels following position of mouse
function circleYugy() {
    var startX = mouseX;
    var startY = mouseY;
    var limitX = constrain(floor(startX), 0, width - 1);
    var limitY = constrain(floor(startY), 0, height - 1);
    var color = imgYugy.get(limitX, limitY);
    var diam = random(3, 10);

    noFill();
    stroke(color);
    ellipse(startX, startY, diam, diam);
}

//diagonal lines at random
function curveYugy() {
    var startX = random(0, width);
    var startY = random(0, height);
    var limitX = constrain(floor(startX), 0, width-1);
    var limitY = constrain(floor(startY), 0, height-1);
    var color = imgYugy.get(limitX, limitY);
    var diagonalLength = random(5, 25);
    var thickness = random(.5, 3);
    var curveScale = random(.1, .6)

    stroke(color);
    strokeWeight(thickness);
    noFill();
    beginShape();
    curveVertex(startX, startY);
    curveVertex(startX, startY);
    curveVertex(startX+curveScale*diagonalLength, startY+.6*diagonalLength);
    curveVertex(startX+diagonalLength, startY+diagonalLength);
    curveVertex(startX+diagonalLength, startY+diagonalLength);
    endShape();
}

There are two ways the portrait gets drawn onto the canvas: through random curves following the FrameRate and circular lines that follow the position of the mouse while capturing pixel color of each location. I wanted to create a paint-brushed effect as the end product while incorporating another geometry to disturb what could be a smooth surface. Bellow are the screenshots of the stages with just the curves and with circular curves.

           original picture                first stage (just curves)               second stage

   first stage (with circles)                 second stage                            third stage

Looking Outwards 09 – Yugyeong Lee

‘A Musical Wall where Little People Live’ (2017) by teamLab is an interactive projection on a wall which reacts to physical objects that are placed on the wall. I agree that this project is “effective in its simplicity” which allows children to play and easily interact with the game-like aspect of the project. In addition, the project evoke a “sense of delight” not only for children but also for an adult as the projected plot and the characters stimulate genuine atmosphere for any age to enjoy. Exploring the relationship between virtual and physical world, the project blends the two different worlds through generative and creative way in which the users wander off and explore to find patterns. As interactive installations “allow for unlimited possibilities of expression and transformation,” the project definitely opens up possibilities for filed of art. As Fon assessed the possibility of this technology that “helps us rethink and expand the field of art,” its appliance in architecture could also be an interesting adaptation where architecture can incorporate interactive designs to explore different experiences of a single space.

link: https://courses.ideate.cmu.edu/15-104/f2017/2017/09/01/keuchuka-looking-outwards-01/

 

Looking Outwards 08 – Yugyeong Lee

James George is a media artist and programmer who generate films, interactive installations, and software products advancing the art of the moving image. He created a new medium for cinematic expression through incorporating codes to bring volume to his films. In his presentation at Eyebeam Art + Technology, he talks about photography. In times where “ten percent of all photos were taken in the last twelve months,” he explores what that would mean for photographers in terms of their contribution to the data base. This leads to fascination with humans in digital space. One of the projects that reflects that interest was Depth Editor Debug which depicts fragments of candid photographs placed into a three dimensional space. By utilizing data from video game controller in combination with custom software, this project captures unique data from a depth-sensing camera and visualize people existing in their natural way in the virtual environment. Now the project has been developed to create films with the same concept and allow users to visualize data online. James George is admirable in the way he visualize and transform a typical photograph into an interactive form represented in physical space reflects his inquiry on humans in digital space. He also presented in a step by step process that brings the viewers to engage with the topic through the use of quotes and examples that reflect the purpose of these projects.

lecture: https://vimeo.com/channels/eyeo2015/134973504

website: http://jamesgeorge.org/

photographs of Depth Editor Debug project

Project 07 – Yugyeong Lee

 

sketch

//Yugyeong Lee
//Section B
//yugyeonl@andrew.cmu.edu
//Project-07

var nPointsA= 600;
var nPointsB= 200;
var nPointsC= 800;
var x;
var y;
var constrainX;

function setup() {
    createCanvas(480, 480);
}

function draw() {
	//ghosted background changing color based on mouseX
	var constrainX = constrain(mouseX, 0, width); //constraining mouseX in the page
	var r = map(constrainX, 0, width, 100, 39);
	var g = map(constrainX, 0, width, 43, 15);
	var b = map(constrainX, 0, width, 107, 54);
    background(r, g, b, 40);
    //Hypotrochoid & Ellipse
    push();
    translate(width/2, height/2);
    drawHypotrochoid();
    drawEllipse();
    pop();
    //Ranunculoid (only appears if mouseX is on left three quarter of page)
    if (constrainX < 3*width/4) {
   		push();
    	translate(width/2, height/2);
    	//mapping the angle of rotation based on mouseX
    	var angle = map(constrainX, 0, width, 0, 4 * TWO_PI);
		rotate(angle);
    	drawRanunculoid();
    	pop();
    }
}

function drawHypotrochoid() {
    constrainX = constrain(mouseX, 0, width); //constraining mouseX in the page
	var n = map(constrainX, 0, width, 0, .5);
    var a = 225;
    var b = n*a;
    var h = constrain(mouseY/2, 0, b);

    noFill();
    //change strokeWeight based on mouseX
    var s = map(constrainX, 0, width, .1, 2);
    //change stroke color based on mouseX
    var c = map(constrainX, 0, width, 255, 100);
    stroke(c)
    strokeWeight(s);
	beginShape()
		for (var i = 0; i < nPointsA; i++) {
			var t = map(i, 0, nPointsA, 0, 6*TWO_PI);
			x = (a - b)*cos(t) + h*cos((a - b)/b*t);
			y = (a-b)*sin(t) + h*sin((a-b)/b*t);
			vertex(x, y);
		}
	endShape(CLOSE);
 }

 function drawRanunculoid() {
    constrainX = constrain(mouseX, 0, width); //constraining mouseX in the page
    var a = 30;
    noStroke();
    //change fill oppacity based on mouseX
    var opacity = map(constrainX, 0, width, 10, 50);
    fill(255, 255, 255, opacity);
	beginShape()
		for (var i = 0; i < nPointsB; i++) {
			var t = map(i, 0, nPointsB, 0, TWO_PI);
			x = a*(6*cos(t)-cos(6*t));
			y = a*(6*sin(t)-sin(6*t));
        	ellipse(x, y, 5, 5); //ellipse at each points
		}
	endShape(CLOSE);
 }

function drawEllipse() {
    constrainX = constrain(mouseX, 0, width); //constraining mouseX in the page
    var a = 270;
    var b = 270;
    noFill();
	beginShape()
		for (var i = 0; i < nPointsC; i++) {
			var t = map(i, 0, nPointsC, 0, 6*TWO_PI);
			x = a*cos(t);
			y = b*sin(t);
			//ellipse at random position x and y range
        	ellipse(x+random(-50, 50), y+random(-50, 50), 3, 3);
		}
	endShape(CLOSE);
 }

I used three different curves for this interactive design: hypotrochoid, ranunculoid, and ellipse. The final product is interactive in that not only does the design react to position Y of the mouse location, the curves, the angle it rotates, stroke weight, stroke color, stroke opacity and background color all react to position X of the mouse location. Because of the circular movement of the other two curves, I wanted to create a “night-sky-with-blinking-stars” effect through creating an ellipse in which points are randomly positioned within a limited range with white blinking circles. The color palette was also chose to reflect the night sky atmosphere.

how Hypotrochoid & Ranunculoid works