What's new

Python instead of running the code it just display it in text form

E

Eyyo

Guest
I tried like this but instead of running the code it just display it in text form here's my code


Code:
from flask import Flask

app = Flask(__name__)

@app.route('/detection-system')
def display_code():
    code = """
    import cv2
import numpy as np
import time
import winsound

modelConfiguration = "yolov3_custom.cfg"
modelWeights = "yolov3_custom_last.weights"
net = cv2.dnn.readNet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

classes = []
with open("obj.names", "r") as f:
    classes = f.read().splitlines()

cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
font = cv2.FONT_HERSHEY_PLAIN
colors = np.random.uniform(0, 255, size=(100, 3))

# create an empty list to store labels and confidence values for each iteration
detections = []

# Setting up timer
start_time = time.time()
# Time interval to save results
interval = 1

while True:
    _, img = cap.read()
    height, width, _ = img.shape

    blob = cv2.dnn.blobFromImage(img, 1 / 255, (416, 416), (0, 0, 0), swapRB=True, crop=False)
    net.setInput(blob)
    output_layers_names = net.getUnconnectedOutLayersNames()
    layerOutputs = net.forward(output_layers_names)

    boxes = []
    confidences = []
    class_ids = []

    for output in layerOutputs:
        for detection in output:
            scores = detection[5:]
            class_id = np.argmax(scores)
            confidence = scores[class_id]
            if confidence > 0.2:
                center_x = int(detection[0] * width)
                center_y = int(detection[1] * height)
                w = int(detection[2] * width)
                h = int(detection[3] * height)

                x = int(center_x - w / 2)
                y = int(center_y - h / 2)

                boxes.append([x, y, w, h])
                confidences.append((float(confidence)))
                class_ids.append(class_id)

    indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.2, 0.4)

    if len(indexes) > 0:
        for i in indexes.flatten():
            x, y, w, h = boxes[i]
            label = str(classes[class_ids[i]])
            new_confidence = str(round(confidences[i], 2))
            color = colors[i]
            cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
            cv2.putText(img, label + " " + new_confidence, (x, y + 20), font, 1, (255, 255, 255), 2)

            # Alert sound and save function
            if label == "No_Goggles" and float(new_confidence) >= 0.8:
                winsound.PlaySound("test-sound", winsound.SND_FILENAME)

            if label == "No_Gloves" and float(new_confidence) >= 0.8:
                winsound.PlaySound("test-sound", winsound.SND_FILENAME)

            if label == "No_Mask" and float(new_confidence) >= 0.8:
                winsound.PlaySound("test-sound", winsound.SND_FILENAME)

            if label == "No_Labcoat" and float(new_confidence) >= 0.8:
                winsound.PlaySound("test-sound", winsound.SND_FILENAME)

            detections.append((label, new_confidence))

            # detected will save to localhost
            save_path = 'detected.jpg'
            cv2.imwrite(save_path, img)
            cv2.imshow('Image', img)

            # Update confidence and label values every minute
            if time.time() - start_time >= interval:
                start_time = time.time()
                with open('results.txt', 'a') as f:
                    for detection in detections:
                        label, confidence = detection
                        f.write(label + ' ' + confidence + '\n')
                    f.write('\n')

    key = cv2.waitKey(1)
    if key == 27:
        break

cap.release()
cv2.destroyAllWindows()
    """
    return '<pre>' + code + '</pre>'

if __name__ == '__main__':
    app.run(debug=True)
 
To display code in a formatted manner, you can use the <code> and <pre> tags in HTML. To display your code, replace the display_code() function with the following:

Code:
@app.route('/detection-system')
def display_code():
    with open(__file__, 'r') as f:
        code = f.read()
    return "<pre><code>" + code + "</code></pre>"

This will display the code in a formatted manner with line numbers. Note that __file__ is a built-in Python variable that represents the path to the current file.
 
To display code in a formatted manner, you can use the <code> and <pre> tags in HTML. To display your code, replace the display_code() function with the following:

Code:
@app.route('/detection-system')
def display_code():
    with open(__file__, 'r') as f:
        code = f.read()
    return "<pre><code>" + code + "</code></pre>"

This will display the code in a formatted manner with line numbers. Note that __file__ is a built-in Python variable that represents the path to the current file.
Can you instead modify the code I write above?
 
Can you instead modify the code I write above?
Yes, here is the modified code that you can use:

Code:
from flask import Flask

app = Flask(__name__)

@app.route('/detection-system')
def display_code():
    code = """
    import cv2
    import numpy as np
    import time
    import winsound

    modelConfiguration = "yolov3_custom.cfg"
    modelWeights = "yolov3_custom_last.weights"
    net = cv2.dnn.readNet(modelConfiguration, modelWeights)
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
    net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

    classes = []
    with open("obj.names", "r") as f:
        classes = f.read().splitlines()

    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    font = cv2.FONT_HERSHEY_PLAIN
    colors = np.random.uniform(0, 255, size=(100, 3))

    # create an empty list to store labels and confidence values for each iteration
    detections = []

    # Setting up timer
    start_time = time.time()
    # Time interval to save results
    interval = 1

    while True:
        _, img = cap.read()
        height, width, _ = img.shape

        blob = cv2.dnn.blobFromImage(img, 1 / 255, (416, 416), (0, 0, 0), swapRB=True, crop=False)
        net.setInput(blob)
        output_layers_names = net.getUnconnectedOutLayersNames()
        layerOutputs = net.forward(output_layers_names)

        boxes = []
        confidences = []
        class_ids = []

        for output in layerOutputs:
            for detection in output:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                if confidence > 0.2:
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)

                    x = int(center_x - w / 2)
                    y = int(center_y - h / 2)

                    boxes.append([x, y, w, h])
                    confidences.append((float(confidence)))
                    class_ids.append(class_id)

        indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.2, 0.4)

        if len(indexes) > 0:
            for i in indexes.flatten():
                x, y, w, h = boxes[i]
                label = str(classes[class_ids[i]])
                new_confidence = str(round(confidences[i], 2))
                color = colors[i]
                cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
                cv2.putText(img, label + " " + new_confidence, (x, y + 20), font, 1, (255, 255, 255), 2)

                # Alert sound and save function
                if label == "No_Goggles" and float(new_confidence) >= 0.8:
                    winsound.PlaySound("test-sound", winsound.SND_FILENAME)

                if label == "No_Gloves" and float(new_confidence) >= 0.8:
                    winsound.PlaySound("test-sound", winsound.SND_FILENAME)

                if label == "No_Mask" and float(new_confidence) >= 0.8:
                    winsound.PlaySound("test-sound", winsound.SND_FILENAME)

                if label == "No_Labcoat" and float(new_confidence) >= 0.8:
                    winsound.PlaySound("test-sound", winsound.SND_FILENAME)

                detections.append((label, new_confidence))

                # detected will save to localhost
                save_path = 'detected.jpg'
                cv2.imwrite(save_path, img)
                cv2.imshow('Image', img)

                # Update confidence and label values every minute
                if time.time() - start_time >= interval:
                    start_time = time.time()
                    with open('results.txt', 'a') as f:
                        for detection in detections:
                            label, confidence = detection
                            f.write(label + ' ' + confidence + '\n')
                        f.write('\n')

        key = cv2.waitKey(1)
        if key == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
    """
    return '<pre>' + code + '</pre>'

if __name__ == '__main__':
    app.run(debug=True)

This will display the code in a formatted manner with line numbers when you navigate to the /detection-system route in your Flask application.
 
Yes, here is the modified code that you can use:

Code:
from flask import Flask

app = Flask(__name__)

@app.route('/detection-system')
def display_code():
    code = """
    import cv2
    import numpy as np
    import time
    import winsound

    modelConfiguration = "yolov3_custom.cfg"
    modelWeights = "yolov3_custom_last.weights"
    net = cv2.dnn.readNet(modelConfiguration, modelWeights)
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
    net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

    classes = []
    with open("obj.names", "r") as f:
        classes = f.read().splitlines()

    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    font = cv2.FONT_HERSHEY_PLAIN
    colors = np.random.uniform(0, 255, size=(100, 3))

    # create an empty list to store labels and confidence values for each iteration
    detections = []

    # Setting up timer
    start_time = time.time()
    # Time interval to save results
    interval = 1

    while True:
        _, img = cap.read()
        height, width, _ = img.shape

        blob = cv2.dnn.blobFromImage(img, 1 / 255, (416, 416), (0, 0, 0), swapRB=True, crop=False)
        net.setInput(blob)
        output_layers_names = net.getUnconnectedOutLayersNames()
        layerOutputs = net.forward(output_layers_names)

        boxes = []
        confidences = []
        class_ids = []

        for output in layerOutputs:
            for detection in output:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                if confidence > 0.2:
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)

                    x = int(center_x - w / 2)
                    y = int(center_y - h / 2)

                    boxes.append([x, y, w, h])
                    confidences.append((float(confidence)))
                    class_ids.append(class_id)

        indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.2, 0.4)

        if len(indexes) > 0:
            for i in indexes.flatten():
                x, y, w, h = boxes[i]
                label = str(classes[class_ids[i]])
                new_confidence = str(round(confidences[i], 2))
                color = colors[i]
                cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
                cv2.putText(img, label + " " + new_confidence, (x, y + 20), font, 1, (255, 255, 255), 2)

                # Alert sound and save function
                if label == "No_Goggles" and float(new_confidence) >= 0.8:
                    winsound.PlaySound("test-sound", winsound.SND_FILENAME)

                if label == "No_Gloves" and float(new_confidence) >= 0.8:
                    winsound.PlaySound("test-sound", winsound.SND_FILENAME)

                if label == "No_Mask" and float(new_confidence) >= 0.8:
                    winsound.PlaySound("test-sound", winsound.SND_FILENAME)

                if label == "No_Labcoat" and float(new_confidence) >= 0.8:
                    winsound.PlaySound("test-sound", winsound.SND_FILENAME)

                detections.append((label, new_confidence))

                # detected will save to localhost
                save_path = 'detected.jpg'
                cv2.imwrite(save_path, img)
                cv2.imshow('Image', img)

                # Update confidence and label values every minute
                if time.time() - start_time >= interval:
                    start_time = time.time()
                    with open('results.txt', 'a') as f:
                        for detection in detections:
                            label, confidence = detection
                            f.write(label + ' ' + confidence + '\n')
                        f.write('\n')

        key = cv2.waitKey(1)
        if key == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
    """
    return '<pre>' + code + '</pre>'

if __name__ == '__main__':
    app.run(debug=True)

This will display the code in a formatted manner with line numbers when you navigate to the /detection-system route in your Flask application.
Do I need to get the frame in detection from camera output to display in the web page?
 
Back
Top