FtcRobotController v8.2
This commit is contained in:
@ -8,7 +8,7 @@ apply plugin: 'com.android.library'
|
||||
android {
|
||||
|
||||
defaultConfig {
|
||||
minSdkVersion 23
|
||||
minSdkVersion 24
|
||||
//noinspection ExpiredTargetSdkVersion
|
||||
targetSdkVersion 28
|
||||
buildConfigField "String", "APP_BUILD_TIME", '"' + (new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ", Locale.ROOT).format(new Date())) + '"'
|
||||
@ -17,8 +17,8 @@ android {
|
||||
compileSdkVersion 29
|
||||
|
||||
compileOptions {
|
||||
sourceCompatibility JavaVersion.VERSION_1_7
|
||||
targetCompatibility JavaVersion.VERSION_1_7
|
||||
sourceCompatibility JavaVersion.VERSION_1_8
|
||||
targetCompatibility JavaVersion.VERSION_1_8
|
||||
}
|
||||
namespace = 'com.qualcomm.ftcrobotcontroller'
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
xmlns:tools="http://schemas.android.com/tools"
|
||||
android:versionCode="49"
|
||||
android:versionName="8.1.1">
|
||||
android:versionCode="50"
|
||||
android:versionName="8.2">
|
||||
|
||||
<uses-permission android:name="android.permission.RECEIVE_BOOT_COMPLETED" />
|
||||
|
||||
|
@ -0,0 +1,192 @@
|
||||
/* Copyright (c) 2023 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import java.util.List;
|
||||
import android.util.Size;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.BuiltinCameraDirection;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.DistanceUnit;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.AngleUnit;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagDetection;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagProcessor;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagGameDatabase;
|
||||
|
||||
/**
|
||||
* This 2023-2024 OpMode illustrates the basics of AprilTag recognition and pose estimation,
|
||||
* including Java Builder structures for specifying Vision parameters.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*/
|
||||
@TeleOp(name = "Concept: AprilTag", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptAprilTag extends LinearOpMode {
|
||||
|
||||
private static final boolean USE_WEBCAM = true; // true for webcam, false for phone camera
|
||||
|
||||
/**
|
||||
* {@link #aprilTag} is the variable to store our instance of the AprilTag processor.
|
||||
*/
|
||||
private AprilTagProcessor aprilTag;
|
||||
|
||||
/**
|
||||
* {@link #visionPortal} is the variable to store our instance of the vision portal.
|
||||
*/
|
||||
private VisionPortal visionPortal;
|
||||
|
||||
@Override
|
||||
public void runOpMode() {
|
||||
|
||||
initAprilTag();
|
||||
|
||||
// Wait for the DS start button to be touched.
|
||||
telemetry.addData("DS preview on/off", "3 dots, Camera Stream");
|
||||
telemetry.addData(">", "Touch Play to start OpMode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
if (opModeIsActive()) {
|
||||
while (opModeIsActive()) {
|
||||
|
||||
telemetryAprilTag();
|
||||
|
||||
// Push telemetry to the Driver Station.
|
||||
telemetry.update();
|
||||
|
||||
// Save CPU resources; can resume streaming when needed.
|
||||
if (gamepad1.dpad_down) {
|
||||
visionPortal.stopStreaming();
|
||||
} else if (gamepad1.dpad_up) {
|
||||
visionPortal.resumeStreaming();
|
||||
}
|
||||
|
||||
// Share the CPU.
|
||||
sleep(20);
|
||||
}
|
||||
}
|
||||
|
||||
// Save more CPU resources when camera is no longer needed.
|
||||
visionPortal.close();
|
||||
|
||||
} // end method runOpMode()
|
||||
|
||||
/**
|
||||
* Initialize the AprilTag processor.
|
||||
*/
|
||||
private void initAprilTag() {
|
||||
|
||||
// Create the AprilTag processor.
|
||||
aprilTag = new AprilTagProcessor.Builder()
|
||||
//.setDrawAxes(false)
|
||||
//.setDrawCubeProjection(false)
|
||||
//.setDrawTagOutline(true)
|
||||
//.setTagFamily(AprilTagProcessor.TagFamily.TAG_36h11)
|
||||
//.setTagLibrary(AprilTagGameDatabase.getCenterStageTagLibrary())
|
||||
//.setOutputUnits(DistanceUnit.INCH, AngleUnit.DEGREES)
|
||||
|
||||
// == CAMERA CALIBRATION ==
|
||||
// If you do not manually specify calibration parameters, the SDK will attempt
|
||||
// to load a predefined calibration for your camera.
|
||||
//.setLensIntrinsics(578.272, 578.272, 402.145, 221.506)
|
||||
|
||||
// ... these parameters are fx, fy, cx, cy.
|
||||
|
||||
.build();
|
||||
|
||||
// Create the vision portal by using a builder.
|
||||
VisionPortal.Builder builder = new VisionPortal.Builder();
|
||||
|
||||
// Set the camera (webcam vs. built-in RC phone camera).
|
||||
if (USE_WEBCAM) {
|
||||
builder.setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"));
|
||||
} else {
|
||||
builder.setCamera(BuiltinCameraDirection.BACK);
|
||||
}
|
||||
|
||||
// Choose a camera resolution. Not all cameras support all resolutions.
|
||||
//builder.setCameraResolution(new Size(640, 480));
|
||||
|
||||
// Enable the RC preview (LiveView). Set "false" to omit camera monitoring.
|
||||
//builder.enableCameraMonitoring(true);
|
||||
|
||||
// Set the stream format; MJPEG uses less bandwidth than default YUY2.
|
||||
//builder.setStreamFormat(VisionPortal.StreamFormat.YUY2);
|
||||
|
||||
// Choose whether or not LiveView stops if no processors are enabled.
|
||||
// If set "true", monitor shows solid orange screen if no processors enabled.
|
||||
// If set "false", monitor shows camera view without annotations.
|
||||
//builder.setAutoStopLiveView(false);
|
||||
|
||||
// Set and enable the processor.
|
||||
builder.addProcessor(aprilTag);
|
||||
|
||||
// Build the Vision Portal, using the above settings.
|
||||
visionPortal = builder.build();
|
||||
|
||||
// Disable or re-enable the aprilTag processor at any time.
|
||||
//visionPortal.setProcessorEnabled(aprilTag, true);
|
||||
|
||||
} // end method initAprilTag()
|
||||
|
||||
|
||||
/**
|
||||
* Function to add telemetry about AprilTag detections.
|
||||
*/
|
||||
private void telemetryAprilTag() {
|
||||
|
||||
List<AprilTagDetection> currentDetections = aprilTag.getDetections();
|
||||
telemetry.addData("# AprilTags Detected", currentDetections.size());
|
||||
|
||||
// Step through the list of detections and display info for each one.
|
||||
for (AprilTagDetection detection : currentDetections) {
|
||||
if (detection.metadata != null) {
|
||||
telemetry.addLine(String.format("\n==== (ID %d) %s", detection.id, detection.metadata.name));
|
||||
telemetry.addLine(String.format("XYZ %6.1f %6.1f %6.1f (inch)", detection.ftcPose.x, detection.ftcPose.y, detection.ftcPose.z));
|
||||
telemetry.addLine(String.format("PRY %6.1f %6.1f %6.1f (deg)", detection.ftcPose.pitch, detection.ftcPose.roll, detection.ftcPose.yaw));
|
||||
telemetry.addLine(String.format("RBE %6.1f %6.1f %6.1f (inch, deg, deg)", detection.ftcPose.range, detection.ftcPose.bearing, detection.ftcPose.elevation));
|
||||
} else {
|
||||
telemetry.addLine(String.format("\n==== (ID %d) Unknown", detection.id));
|
||||
telemetry.addLine(String.format("Center %6.0f %6.0f (pixels)", detection.center.x, detection.center.y));
|
||||
}
|
||||
} // end for() loop
|
||||
|
||||
// Add "key" information to telemetry
|
||||
telemetry.addLine("\nkey:\nXYZ = X (Right), Y (Forward), Z (Up) dist.");
|
||||
telemetry.addLine("PRY = Pitch, Roll & Yaw (XYZ Rotation)");
|
||||
telemetry.addLine("RBE = Range, Bearing & Elevation");
|
||||
|
||||
} // end method telemetryAprilTag()
|
||||
|
||||
} // end class
|
@ -0,0 +1,148 @@
|
||||
/* Copyright (c) 2023 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import java.util.List;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.BuiltinCameraDirection;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagDetection;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagProcessor;
|
||||
|
||||
/**
|
||||
* This 2023-2024 OpMode illustrates the basics of AprilTag recognition and pose estimation, using
|
||||
* the easy way.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*/
|
||||
@TeleOp(name = "Concept: AprilTag Easy", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptAprilTagEasy extends LinearOpMode {
|
||||
|
||||
private static final boolean USE_WEBCAM = true; // true for webcam, false for phone camera
|
||||
|
||||
/**
|
||||
* {@link #aprilTag} is the variable to store our instance of the AprilTag processor.
|
||||
*/
|
||||
private AprilTagProcessor aprilTag;
|
||||
|
||||
/**
|
||||
* {@link #visionPortal} is the variable to store our instance of the vision portal.
|
||||
*/
|
||||
private VisionPortal visionPortal;
|
||||
|
||||
@Override
|
||||
public void runOpMode() {
|
||||
|
||||
initAprilTag();
|
||||
|
||||
// Wait for the DS start button to be touched.
|
||||
telemetry.addData("DS preview on/off", "3 dots, Camera Stream");
|
||||
telemetry.addData(">", "Touch Play to start OpMode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
if (opModeIsActive()) {
|
||||
while (opModeIsActive()) {
|
||||
|
||||
telemetryAprilTag();
|
||||
|
||||
// Push telemetry to the Driver Station.
|
||||
telemetry.update();
|
||||
|
||||
// Save CPU resources; can resume streaming when needed.
|
||||
if (gamepad1.dpad_down) {
|
||||
visionPortal.stopStreaming();
|
||||
} else if (gamepad1.dpad_up) {
|
||||
visionPortal.resumeStreaming();
|
||||
}
|
||||
|
||||
// Share the CPU.
|
||||
sleep(20);
|
||||
}
|
||||
}
|
||||
|
||||
// Save more CPU resources when camera is no longer needed.
|
||||
visionPortal.close();
|
||||
|
||||
} // end method runOpMode()
|
||||
|
||||
/**
|
||||
* Initialize the AprilTag processor.
|
||||
*/
|
||||
private void initAprilTag() {
|
||||
|
||||
// Create the AprilTag processor the easy way.
|
||||
aprilTag = AprilTagProcessor.easyCreateWithDefaults();
|
||||
|
||||
// Create the vision portal the easy way.
|
||||
if (USE_WEBCAM) {
|
||||
visionPortal = VisionPortal.easyCreateWithDefaults(
|
||||
hardwareMap.get(WebcamName.class, "Webcam 1"), aprilTag);
|
||||
} else {
|
||||
visionPortal = VisionPortal.easyCreateWithDefaults(
|
||||
BuiltinCameraDirection.BACK, aprilTag);
|
||||
}
|
||||
|
||||
} // end method initAprilTag()
|
||||
|
||||
/**
|
||||
* Function to add telemetry about AprilTag detections.
|
||||
*/
|
||||
private void telemetryAprilTag() {
|
||||
|
||||
List<AprilTagDetection> currentDetections = aprilTag.getDetections();
|
||||
telemetry.addData("# AprilTags Detected", currentDetections.size());
|
||||
|
||||
// Step through the list of detections and display info for each one.
|
||||
for (AprilTagDetection detection : currentDetections) {
|
||||
if (detection.metadata != null) {
|
||||
telemetry.addLine(String.format("\n==== (ID %d) %s", detection.id, detection.metadata.name));
|
||||
telemetry.addLine(String.format("XYZ %6.1f %6.1f %6.1f (inch)", detection.ftcPose.x, detection.ftcPose.y, detection.ftcPose.z));
|
||||
telemetry.addLine(String.format("PRY %6.1f %6.1f %6.1f (deg)", detection.ftcPose.pitch, detection.ftcPose.roll, detection.ftcPose.yaw));
|
||||
telemetry.addLine(String.format("RBE %6.1f %6.1f %6.1f (inch, deg, deg)", detection.ftcPose.range, detection.ftcPose.bearing, detection.ftcPose.elevation));
|
||||
} else {
|
||||
telemetry.addLine(String.format("\n==== (ID %d) Unknown", detection.id));
|
||||
telemetry.addLine(String.format("Center %6.0f %6.0f (pixels)", detection.center.x, detection.center.y));
|
||||
}
|
||||
} // end for() loop
|
||||
|
||||
// Add "key" information to telemetry
|
||||
telemetry.addLine("\nkey:\nXYZ = X (Right), Y (Forward), Z (Up) dist.");
|
||||
telemetry.addLine("PRY = Pitch, Roll & Yaw (XYZ Rotation)");
|
||||
telemetry.addLine("RBE = Range, Bearing & Elevation");
|
||||
|
||||
} // end method telemetryAprilTag()
|
||||
|
||||
} // end class
|
@ -0,0 +1,248 @@
|
||||
/* Copyright (c) 2023 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import com.qualcomm.robotcore.util.Range;
|
||||
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.controls.ExposureControl;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.controls.GainControl;
|
||||
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagDetection;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagProcessor;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* This OpMode determines the best Exposure for minimizing image motion-blur on a Webcam
|
||||
* Note that it is not possible to control the exposure for a Phone Camera, so if you are using a Phone for the Robot Controller
|
||||
* this OpMode/Feature only applies to an externally connected Webcam
|
||||
*
|
||||
* The goal is to determine the smallest (shortest) Exposure value that still provides reliable Tag Detection.
|
||||
* Starting with the minimum Exposure and maximum Gain, the exposure is slowly increased until the Tag is
|
||||
* detected reliably from the likely operational distance.
|
||||
*
|
||||
*
|
||||
* The best way to run this optimization is to view the camera preview screen while changing the exposure and gains.
|
||||
*
|
||||
* To do this, you need to view the RobotController screen directly (not from Driver Station)
|
||||
* This can be done directly from a RC phone screen (if you are using an external Webcam), but for a Control Hub you must either plug an
|
||||
* HDMI monitor into the Control Hub HDMI port, or use an external viewer program like ScrCpy (https://scrcpy.org/)
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into the TeamCode/src/main/java/org/firstinspires/ftc/teamcode folder.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*/
|
||||
|
||||
@TeleOp(name="Optimize AprilTag Exposure", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptAprilTagOptimizeExposure extends LinearOpMode
|
||||
{
|
||||
private VisionPortal visionPortal = null; // Used to manage the video source.
|
||||
private AprilTagProcessor aprilTag; // Used for managing the AprilTag detection process.
|
||||
private int myExposure ;
|
||||
private int minExposure ;
|
||||
private int maxExposure ;
|
||||
private int myGain ;
|
||||
private int minGain ;
|
||||
private int maxGain ;
|
||||
|
||||
boolean thisExpUp = false;
|
||||
boolean thisExpDn = false;
|
||||
boolean thisGainUp = false;
|
||||
boolean thisGainDn = false;
|
||||
|
||||
boolean lastExpUp = false;
|
||||
boolean lastExpDn = false;
|
||||
boolean lastGainUp = false;
|
||||
boolean lastGainDn = false;
|
||||
@Override public void runOpMode()
|
||||
{
|
||||
// Initialize the Apriltag Detection process
|
||||
initAprilTag();
|
||||
|
||||
// Establish Min and Max Gains and Exposure. Then set a low exposure with high gain
|
||||
getCameraSetting();
|
||||
myExposure = Math.min(5, minExposure);
|
||||
myGain = maxGain;
|
||||
setManualExposure(myExposure, myGain);
|
||||
|
||||
// Wait for the match to begin.
|
||||
telemetry.addData("Camera preview on/off", "3 dots, Camera Stream");
|
||||
telemetry.addData(">", "Touch Play to start OpMode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
while (opModeIsActive())
|
||||
{
|
||||
telemetry.addLine("Find lowest Exposure that gives reliable detection.");
|
||||
telemetry.addLine("Use Left bump/trig to adjust Exposure.");
|
||||
telemetry.addLine("Use Right bump/trig to adjust Gain.\n");
|
||||
|
||||
// Display how many Tags Detected
|
||||
List<AprilTagDetection> currentDetections = aprilTag.getDetections();
|
||||
int numTags = currentDetections.size();
|
||||
if (numTags > 0 )
|
||||
telemetry.addData("Tag", "####### %d Detected ######", currentDetections.size());
|
||||
else
|
||||
telemetry.addData("Tag", "----------- none - ----------");
|
||||
|
||||
telemetry.addData("Exposure","%d (%d - %d)", myExposure, minExposure, maxExposure);
|
||||
telemetry.addData("Gain","%d (%d - %d)", myGain, minGain, maxGain);
|
||||
telemetry.update();
|
||||
|
||||
// check to see if we need to change exposure or gain.
|
||||
thisExpUp = gamepad1.left_bumper;
|
||||
thisExpDn = gamepad1.left_trigger > 0.25;
|
||||
thisGainUp = gamepad1.right_bumper;
|
||||
thisGainDn = gamepad1.right_trigger > 0.25;
|
||||
|
||||
// look for clicks to change exposure
|
||||
if (thisExpUp && !lastExpUp) {
|
||||
myExposure = Range.clip(myExposure + 1, minExposure, maxExposure);
|
||||
setManualExposure(myExposure, myGain);
|
||||
} else if (thisExpDn && !lastExpDn) {
|
||||
myExposure = Range.clip(myExposure - 1, minExposure, maxExposure);
|
||||
setManualExposure(myExposure, myGain);
|
||||
}
|
||||
|
||||
// look for clicks to change the gain
|
||||
if (thisGainUp && !lastGainUp) {
|
||||
myGain = Range.clip(myGain + 1, minGain, maxGain );
|
||||
setManualExposure(myExposure, myGain);
|
||||
} else if (thisGainDn && !lastGainDn) {
|
||||
myGain = Range.clip(myGain - 1, minGain, maxGain );
|
||||
setManualExposure(myExposure, myGain);
|
||||
}
|
||||
|
||||
lastExpUp = thisExpUp;
|
||||
lastExpDn = thisExpDn;
|
||||
lastGainUp = thisGainUp;
|
||||
lastGainDn = thisGainDn;
|
||||
|
||||
sleep(20);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the AprilTag processor.
|
||||
*/
|
||||
private void initAprilTag() {
|
||||
// Create the AprilTag processor by using a builder.
|
||||
aprilTag = new AprilTagProcessor.Builder().build();
|
||||
|
||||
// Create the WEBCAM vision portal by using a builder.
|
||||
visionPortal = new VisionPortal.Builder()
|
||||
.setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"))
|
||||
.addProcessor(aprilTag)
|
||||
.build();
|
||||
}
|
||||
|
||||
/*
|
||||
Manually set the camera gain and exposure.
|
||||
Can only be called AFTER calling initAprilTag();
|
||||
Returns true if controls are set.
|
||||
*/
|
||||
private boolean setManualExposure(int exposureMS, int gain) {
|
||||
// Ensure Vision Portal has been setup.
|
||||
if (visionPortal == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Wait for the camera to be open
|
||||
if (visionPortal.getCameraState() != VisionPortal.CameraState.STREAMING) {
|
||||
telemetry.addData("Camera", "Waiting");
|
||||
telemetry.update();
|
||||
while (!isStopRequested() && (visionPortal.getCameraState() != VisionPortal.CameraState.STREAMING)) {
|
||||
sleep(20);
|
||||
}
|
||||
telemetry.addData("Camera", "Ready");
|
||||
telemetry.update();
|
||||
}
|
||||
|
||||
// Set camera controls unless we are stopping.
|
||||
if (!isStopRequested())
|
||||
{
|
||||
// Set exposure. Make sure we are in Manual Mode for these values to take effect.
|
||||
ExposureControl exposureControl = visionPortal.getCameraControl(ExposureControl.class);
|
||||
if (exposureControl.getMode() != ExposureControl.Mode.Manual) {
|
||||
exposureControl.setMode(ExposureControl.Mode.Manual);
|
||||
sleep(50);
|
||||
}
|
||||
exposureControl.setExposure((long)exposureMS, TimeUnit.MILLISECONDS);
|
||||
sleep(20);
|
||||
|
||||
// Set Gain.
|
||||
GainControl gainControl = visionPortal.getCameraControl(GainControl.class);
|
||||
gainControl.setGain(gain);
|
||||
sleep(20);
|
||||
return (true);
|
||||
} else {
|
||||
return (false);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Read this camera's minimum and maximum Exposure and Gain settings.
|
||||
Can only be called AFTER calling initAprilTag();
|
||||
*/
|
||||
private void getCameraSetting() {
|
||||
// Ensure Vision Portal has been setup.
|
||||
if (visionPortal == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Wait for the camera to be open
|
||||
if (visionPortal.getCameraState() != VisionPortal.CameraState.STREAMING) {
|
||||
telemetry.addData("Camera", "Waiting");
|
||||
telemetry.update();
|
||||
while (!isStopRequested() && (visionPortal.getCameraState() != VisionPortal.CameraState.STREAMING)) {
|
||||
sleep(20);
|
||||
}
|
||||
telemetry.addData("Camera", "Ready");
|
||||
telemetry.update();
|
||||
}
|
||||
|
||||
// Get camera control values unless we are stopping.
|
||||
if (!isStopRequested()) {
|
||||
ExposureControl exposureControl = visionPortal.getCameraControl(ExposureControl.class);
|
||||
minExposure = (int)exposureControl.getMinExposure(TimeUnit.MILLISECONDS) + 1;
|
||||
maxExposure = (int)exposureControl.getMaxExposure(TimeUnit.MILLISECONDS);
|
||||
|
||||
GainControl gainControl = visionPortal.getCameraControl(GainControl.class);
|
||||
minGain = gainControl.getMinGain();
|
||||
maxGain = gainControl.getMaxGain();
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,195 @@
|
||||
/* Copyright (c) 2023 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import java.util.List;
|
||||
import org.firstinspires.ftc.robotcore.external.ClassFactory;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.CameraName;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.VisionPortal.CameraState;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagDetection;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagProcessor;
|
||||
|
||||
/**
|
||||
* This 2023-2024 OpMode illustrates the basics of AprilTag recognition and pose estimation, using
|
||||
* two webcams.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*/
|
||||
@TeleOp(name = "Concept: AprilTag Switchable Cameras", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptAprilTagSwitchableCameras extends LinearOpMode {
|
||||
|
||||
/**
|
||||
* Variables used for switching cameras.
|
||||
*/
|
||||
private WebcamName webcam1, webcam2;
|
||||
private boolean oldLeftBumper;
|
||||
private boolean oldRightBumper;
|
||||
|
||||
/**
|
||||
* {@link #aprilTag} is the variable to store our instance of the AprilTag processor.
|
||||
*/
|
||||
private AprilTagProcessor aprilTag;
|
||||
|
||||
/**
|
||||
* {@link #visionPortal} is the variable to store our instance of the vision portal.
|
||||
*/
|
||||
private VisionPortal visionPortal;
|
||||
|
||||
@Override
|
||||
public void runOpMode() {
|
||||
|
||||
initAprilTag();
|
||||
|
||||
// Wait for the DS start button to be touched.
|
||||
telemetry.addData("DS preview on/off", "3 dots, Camera Stream");
|
||||
telemetry.addData(">", "Touch Play to start OpMode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
if (opModeIsActive()) {
|
||||
while (opModeIsActive()) {
|
||||
|
||||
telemetryCameraSwitching();
|
||||
telemetryAprilTag();
|
||||
|
||||
// Push telemetry to the Driver Station.
|
||||
telemetry.update();
|
||||
|
||||
// Save CPU resources; can resume streaming when needed.
|
||||
if (gamepad1.dpad_down) {
|
||||
visionPortal.stopStreaming();
|
||||
} else if (gamepad1.dpad_up) {
|
||||
visionPortal.resumeStreaming();
|
||||
}
|
||||
|
||||
doCameraSwitching();
|
||||
|
||||
// Share the CPU.
|
||||
sleep(20);
|
||||
}
|
||||
}
|
||||
|
||||
// Save more CPU resources when camera is no longer needed.
|
||||
visionPortal.close();
|
||||
|
||||
} // end runOpMode()
|
||||
|
||||
/**
|
||||
* Initialize the AprilTag processor.
|
||||
*/
|
||||
private void initAprilTag() {
|
||||
|
||||
// Create the AprilTag processor by using a builder.
|
||||
aprilTag = new AprilTagProcessor.Builder().build();
|
||||
|
||||
webcam1 = hardwareMap.get(WebcamName.class, "Webcam 1");
|
||||
webcam2 = hardwareMap.get(WebcamName.class, "Webcam 2");
|
||||
CameraName switchableCamera = ClassFactory.getInstance()
|
||||
.getCameraManager().nameForSwitchableCamera(webcam1, webcam2);
|
||||
|
||||
// Create the vision portal by using a builder.
|
||||
visionPortal = new VisionPortal.Builder()
|
||||
.setCamera(switchableCamera)
|
||||
.addProcessor(aprilTag)
|
||||
.build();
|
||||
|
||||
} // end method initAprilTag()
|
||||
|
||||
/**
|
||||
* Function to add telemetry about camera switching.
|
||||
*/
|
||||
private void telemetryCameraSwitching() {
|
||||
|
||||
if (visionPortal.getActiveCamera().equals(webcam1)) {
|
||||
telemetry.addData("activeCamera", "Webcam 1");
|
||||
telemetry.addData("Press RightBumper", "to switch to Webcam 2");
|
||||
} else {
|
||||
telemetry.addData("activeCamera", "Webcam 2");
|
||||
telemetry.addData("Press LeftBumper", "to switch to Webcam 1");
|
||||
}
|
||||
|
||||
} // end method telemetryCameraSwitching()
|
||||
|
||||
/**
|
||||
* Function to add telemetry about AprilTag detections.
|
||||
*/
|
||||
private void telemetryAprilTag() {
|
||||
|
||||
List<AprilTagDetection> currentDetections = aprilTag.getDetections();
|
||||
telemetry.addData("# AprilTags Detected", currentDetections.size());
|
||||
|
||||
// Step through the list of detections and display info for each one.
|
||||
for (AprilTagDetection detection : currentDetections) {
|
||||
if (detection.metadata != null) {
|
||||
telemetry.addLine(String.format("\n==== (ID %d) %s", detection.id, detection.metadata.name));
|
||||
telemetry.addLine(String.format("XYZ %6.1f %6.1f %6.1f (inch)", detection.ftcPose.x, detection.ftcPose.y, detection.ftcPose.z));
|
||||
telemetry.addLine(String.format("PRY %6.1f %6.1f %6.1f (deg)", detection.ftcPose.pitch, detection.ftcPose.roll, detection.ftcPose.yaw));
|
||||
telemetry.addLine(String.format("RBE %6.1f %6.1f %6.1f (inch, deg, deg)", detection.ftcPose.range, detection.ftcPose.bearing, detection.ftcPose.elevation));
|
||||
} else {
|
||||
telemetry.addLine(String.format("\n==== (ID %d) Unknown", detection.id));
|
||||
telemetry.addLine(String.format("Center %6.0f %6.0f (pixels)", detection.center.x, detection.center.y));
|
||||
}
|
||||
} // end for() loop
|
||||
|
||||
// Add "key" information to telemetry
|
||||
telemetry.addLine("\nkey:\nXYZ = X (Right), Y (Forward), Z (Up) dist.");
|
||||
telemetry.addLine("PRY = Pitch, Roll & Yaw (XYZ Rotation)");
|
||||
telemetry.addLine("RBE = Range, Bearing & Elevation");
|
||||
|
||||
} // end method telemetryAprilTag()
|
||||
|
||||
/**
|
||||
* Function to set the active camera according to input from the gamepad.
|
||||
*/
|
||||
private void doCameraSwitching() {
|
||||
if (visionPortal.getCameraState() == CameraState.STREAMING) {
|
||||
// If the left bumper is pressed, use Webcam 1.
|
||||
// If the right bumper is pressed, use Webcam 2.
|
||||
boolean newLeftBumper = gamepad1.left_bumper;
|
||||
boolean newRightBumper = gamepad1.right_bumper;
|
||||
if (newLeftBumper && !oldLeftBumper) {
|
||||
visionPortal.setActiveCamera(webcam1);
|
||||
} else if (newRightBumper && !oldRightBumper) {
|
||||
visionPortal.setActiveCamera(webcam2);
|
||||
}
|
||||
oldLeftBumper = newLeftBumper;
|
||||
oldRightBumper = newRightBumper;
|
||||
}
|
||||
|
||||
} // end method doCameraSwitching()
|
||||
|
||||
} // end class
|
@ -0,0 +1,201 @@
|
||||
/* Copyright (c) 2023 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import java.util.List;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.BuiltinCameraDirection;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.tfod.Recognition;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagDetection;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagProcessor;
|
||||
import org.firstinspires.ftc.vision.tfod.TfodProcessor;
|
||||
|
||||
/**
|
||||
* This 2023-2024 OpMode illustrates the basics of using both AprilTag recognition and TensorFlow
|
||||
* Object Detection.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*/
|
||||
@TeleOp(name = "Concept: Double Vision", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptDoubleVision extends LinearOpMode {
|
||||
private static final boolean USE_WEBCAM = true; // true for webcam, false for phone camera
|
||||
|
||||
/**
|
||||
* {@link #aprilTag} is the variable to store our instance of the AprilTag processor.
|
||||
*/
|
||||
private AprilTagProcessor aprilTag;
|
||||
|
||||
/**
|
||||
* {@link #tfod} is the variable to store our instance of the TensorFlow Object Detection processor.
|
||||
*/
|
||||
private TfodProcessor tfod;
|
||||
|
||||
/**
|
||||
* {@link #myVisionPortal} is the variable to store our instance of the vision portal.
|
||||
*/
|
||||
private VisionPortal myVisionPortal;
|
||||
|
||||
@Override
|
||||
public void runOpMode() {
|
||||
initDoubleVision();
|
||||
|
||||
// This OpMode loops continuously, allowing the user to switch between
|
||||
// AprilTag and TensorFlow Object Detection (TFOD) image processors.
|
||||
while (!isStopRequested()) {
|
||||
|
||||
if (opModeInInit()) {
|
||||
telemetry.addData("DS preview on/off","3 dots, Camera Stream");
|
||||
telemetry.addLine();
|
||||
telemetry.addLine("----------------------------------------");
|
||||
}
|
||||
|
||||
if (myVisionPortal.getProcessorEnabled(aprilTag)) {
|
||||
// User instructions: Dpad left or Dpad right.
|
||||
telemetry.addLine("Dpad Left to disable AprilTag");
|
||||
telemetry.addLine();
|
||||
telemetryAprilTag();
|
||||
} else {
|
||||
telemetry.addLine("Dpad Right to enable AprilTag");
|
||||
}
|
||||
telemetry.addLine();
|
||||
telemetry.addLine("----------------------------------------");
|
||||
if (myVisionPortal.getProcessorEnabled(tfod)) {
|
||||
telemetry.addLine("Dpad Down to disable TFOD");
|
||||
telemetry.addLine();
|
||||
telemetryTfod();
|
||||
} else {
|
||||
telemetry.addLine("Dpad Up to enable TFOD");
|
||||
}
|
||||
|
||||
// Push telemetry to the Driver Station.
|
||||
telemetry.update();
|
||||
|
||||
if (gamepad1.dpad_left) {
|
||||
myVisionPortal.setProcessorEnabled(aprilTag, false);
|
||||
} else if (gamepad1.dpad_right) {
|
||||
myVisionPortal.setProcessorEnabled(aprilTag, true);
|
||||
}
|
||||
if (gamepad1.dpad_down) {
|
||||
myVisionPortal.setProcessorEnabled(tfod, false);
|
||||
} else if (gamepad1.dpad_up) {
|
||||
myVisionPortal.setProcessorEnabled(tfod, true);
|
||||
}
|
||||
|
||||
sleep(20);
|
||||
|
||||
} // end while loop
|
||||
|
||||
} // end method runOpMode()
|
||||
|
||||
|
||||
/**
|
||||
* Function to initialize AprilTag and TFOD.
|
||||
*/
|
||||
private void initDoubleVision() {
|
||||
// -----------------------------------------------------------------------------------------
|
||||
// AprilTag Configuration
|
||||
// -----------------------------------------------------------------------------------------
|
||||
|
||||
aprilTag = new AprilTagProcessor.Builder()
|
||||
.build();
|
||||
|
||||
// -----------------------------------------------------------------------------------------
|
||||
// TFOD Configuration
|
||||
// -----------------------------------------------------------------------------------------
|
||||
|
||||
tfod = new TfodProcessor.Builder()
|
||||
.build();
|
||||
|
||||
// -----------------------------------------------------------------------------------------
|
||||
// Camera Configuration
|
||||
// -----------------------------------------------------------------------------------------
|
||||
|
||||
if (USE_WEBCAM) {
|
||||
myVisionPortal = new VisionPortal.Builder()
|
||||
.setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"))
|
||||
.addProcessors(tfod, aprilTag)
|
||||
.build();
|
||||
} else {
|
||||
myVisionPortal = new VisionPortal.Builder()
|
||||
.setCamera(BuiltinCameraDirection.BACK)
|
||||
.addProcessors(tfod, aprilTag)
|
||||
.build();
|
||||
}
|
||||
} // end initDoubleVision()
|
||||
|
||||
/**
|
||||
* Function to add telemetry about AprilTag detections.
|
||||
*/
|
||||
private void telemetryAprilTag() {
|
||||
List<AprilTagDetection> currentDetections = aprilTag.getDetections();
|
||||
telemetry.addData("# AprilTags Detected", currentDetections.size());
|
||||
|
||||
// Step through the list of detections and display info for each one.
|
||||
for (AprilTagDetection detection : currentDetections) {
|
||||
if (detection.metadata != null) {
|
||||
telemetry.addLine(String.format("\n==== (ID %d) %s", detection.id, detection.metadata.name));
|
||||
telemetry.addLine(String.format("XYZ %6.1f %6.1f %6.1f (inch)", detection.ftcPose.x, detection.ftcPose.y, detection.ftcPose.z));
|
||||
telemetry.addLine(String.format("PRY %6.1f %6.1f %6.1f (deg)", detection.ftcPose.pitch, detection.ftcPose.roll, detection.ftcPose.yaw));
|
||||
telemetry.addLine(String.format("RBE %6.1f %6.1f %6.1f (inch, deg, deg)", detection.ftcPose.range, detection.ftcPose.bearing, detection.ftcPose.elevation));
|
||||
} else {
|
||||
telemetry.addLine(String.format("\n==== (ID %d) Unknown", detection.id));
|
||||
telemetry.addLine(String.format("Center %6.0f %6.0f (pixels)", detection.center.x, detection.center.y));
|
||||
}
|
||||
} // end for() loop
|
||||
|
||||
} // end method telemetryAprilTag()
|
||||
|
||||
/**
|
||||
* Function to add telemetry about TensorFlow Object Detection (TFOD) recognitions.
|
||||
*/
|
||||
private void telemetryTfod() {
|
||||
List<Recognition> currentRecognitions = tfod.getRecognitions();
|
||||
telemetry.addData("# Objects Detected", currentRecognitions.size());
|
||||
|
||||
// Step through the list of recognitions and display info for each one.
|
||||
for (Recognition recognition : currentRecognitions) {
|
||||
double x = (recognition.getLeft() + recognition.getRight()) / 2 ;
|
||||
double y = (recognition.getTop() + recognition.getBottom()) / 2 ;
|
||||
|
||||
telemetry.addData(""," ");
|
||||
telemetry.addData("Image", "%s (%.0f %% Conf.)", recognition.getLabel(), recognition.getConfidence() * 100);
|
||||
telemetry.addData("- Position", "%.0f / %.0f", x, y);
|
||||
telemetry.addData("- Size", "%.0f x %.0f", recognition.getWidth(), recognition.getHeight());
|
||||
} // end for() loop
|
||||
|
||||
} // end method telemetryTfod()
|
||||
|
||||
} // end class
|
@ -33,157 +33,149 @@ import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import java.util.List;
|
||||
import org.firstinspires.ftc.robotcore.external.ClassFactory;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer.CameraDirection;
|
||||
import org.firstinspires.ftc.robotcore.external.tfod.TFObjectDetector;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.BuiltinCameraDirection;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.tfod.Recognition;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.tfod.TfodProcessor;
|
||||
|
||||
/**
|
||||
* This 2022-2023 OpMode illustrates the basics of using the TensorFlow Object Detection API to
|
||||
* determine which image is being presented to the robot.
|
||||
* This 2023-2024 OpMode illustrates the basics of TensorFlow Object Detection,
|
||||
* including Java Builder structures for specifying Vision parameters.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*
|
||||
* IMPORTANT: In order to use this OpMode, you need to obtain your own Vuforia license key as
|
||||
* is explained below.
|
||||
*/
|
||||
@TeleOp(name = "Concept: TensorFlow Object Detection", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptTensorFlowObjectDetection extends LinearOpMode {
|
||||
|
||||
/*
|
||||
* Specify the source for the Tensor Flow Model.
|
||||
* If the TensorFlowLite object model is included in the Robot Controller App as an "asset",
|
||||
* the OpMode must to load it using loadModelFromAsset(). However, if a team generated model
|
||||
* has been downloaded to the Robot Controller's SD FLASH memory, it must to be loaded using loadModelFromFile()
|
||||
* Here we assume it's an Asset. Also see method initTfod() below .
|
||||
*/
|
||||
private static final String TFOD_MODEL_ASSET = "PowerPlay.tflite";
|
||||
// private static final String TFOD_MODEL_FILE = "/sdcard/FIRST/tflitemodels/CustomTeamModel.tflite";
|
||||
|
||||
private static final String[] LABELS = {
|
||||
"1 Bolt",
|
||||
"2 Bulb",
|
||||
"3 Panel"
|
||||
};
|
||||
|
||||
/*
|
||||
* IMPORTANT: You need to obtain your own license key to use Vuforia. The string below with which
|
||||
* 'parameters.vuforiaLicenseKey' is initialized is for illustration only, and will not function.
|
||||
* A Vuforia 'Development' license key, can be obtained free of charge from the Vuforia developer
|
||||
* web site at https://developer.vuforia.com/license-manager.
|
||||
*
|
||||
* Vuforia license keys are always 380 characters long, and look as if they contain mostly
|
||||
* random data. As an example, here is a example of a fragment of a valid key:
|
||||
* ... yIgIzTqZ4mWjk9wd3cZO9T1axEqzuhxoGlfOOI2dRzKS4T0hQ8kT ...
|
||||
* Once you've obtained a license key, copy the string from the Vuforia web site
|
||||
* and paste it in to your code on the next line, between the double quotes.
|
||||
*/
|
||||
private static final String VUFORIA_KEY =
|
||||
" -- YOUR NEW VUFORIA KEY GOES HERE --- ";
|
||||
private static final boolean USE_WEBCAM = true; // true for webcam, false for phone camera
|
||||
|
||||
/**
|
||||
* {@link #vuforia} is the variable we will use to store our instance of the Vuforia
|
||||
* localization engine.
|
||||
* {@link #tfod} is the variable to store our instance of the TensorFlow Object Detection processor.
|
||||
*/
|
||||
private VuforiaLocalizer vuforia;
|
||||
private TfodProcessor tfod;
|
||||
|
||||
/**
|
||||
* {@link #tfod} is the variable we will use to store our instance of the TensorFlow Object
|
||||
* Detection engine.
|
||||
* {@link #visionPortal} is the variable to store our instance of the vision portal.
|
||||
*/
|
||||
private TFObjectDetector tfod;
|
||||
private VisionPortal visionPortal;
|
||||
|
||||
@Override
|
||||
public void runOpMode() {
|
||||
// The TFObjectDetector uses the camera frames from the VuforiaLocalizer, so we create that
|
||||
// first.
|
||||
initVuforia();
|
||||
|
||||
initTfod();
|
||||
|
||||
/**
|
||||
* Activate TensorFlow Object Detection before we wait for the start command.
|
||||
* Do it here so that the Camera Stream window will have the TensorFlow annotations visible.
|
||||
**/
|
||||
if (tfod != null) {
|
||||
tfod.activate();
|
||||
|
||||
// The TensorFlow software will scale the input images from the camera to a lower resolution.
|
||||
// This can result in lower detection accuracy at longer distances (> 55cm or 22").
|
||||
// If your target is at distance greater than 50 cm (20") you can increase the magnification value
|
||||
// to artificially zoom in to the center of image. For best results, the "aspectRatio" argument
|
||||
// should be set to the value of the images used to create the TensorFlow Object Detection model
|
||||
// (typically 16/9).
|
||||
tfod.setZoom(1.0, 16.0/9.0);
|
||||
}
|
||||
|
||||
/** Wait for the game to begin */
|
||||
telemetry.addData(">", "Press Play to start op mode");
|
||||
// Wait for the DS start button to be touched.
|
||||
telemetry.addData("DS preview on/off", "3 dots, Camera Stream");
|
||||
telemetry.addData(">", "Touch Play to start OpMode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
if (opModeIsActive()) {
|
||||
while (opModeIsActive()) {
|
||||
if (tfod != null) {
|
||||
// getUpdatedRecognitions() will return null if no new information is available since
|
||||
// the last time that call was made.
|
||||
List<Recognition> updatedRecognitions = tfod.getUpdatedRecognitions();
|
||||
if (updatedRecognitions != null) {
|
||||
telemetry.addData("# Objects Detected", updatedRecognitions.size());
|
||||
|
||||
// step through the list of recognitions and display image position/size information for each one
|
||||
// Note: "Image number" refers to the randomized image orientation/number
|
||||
for (Recognition recognition : updatedRecognitions) {
|
||||
double col = (recognition.getLeft() + recognition.getRight()) / 2 ;
|
||||
double row = (recognition.getTop() + recognition.getBottom()) / 2 ;
|
||||
double width = Math.abs(recognition.getRight() - recognition.getLeft()) ;
|
||||
double height = Math.abs(recognition.getTop() - recognition.getBottom()) ;
|
||||
telemetryTfod();
|
||||
|
||||
telemetry.addData(""," ");
|
||||
telemetry.addData("Image", "%s (%.0f %% Conf.)", recognition.getLabel(), recognition.getConfidence() * 100 );
|
||||
telemetry.addData("- Position (Row/Col)","%.0f / %.0f", row, col);
|
||||
telemetry.addData("- Size (Width/Height)","%.0f / %.0f", width, height);
|
||||
}
|
||||
telemetry.update();
|
||||
}
|
||||
// Push telemetry to the Driver Station.
|
||||
telemetry.update();
|
||||
|
||||
// Save CPU resources; can resume streaming when needed.
|
||||
if (gamepad1.dpad_down) {
|
||||
visionPortal.stopStreaming();
|
||||
} else if (gamepad1.dpad_up) {
|
||||
visionPortal.resumeStreaming();
|
||||
}
|
||||
|
||||
// Share the CPU.
|
||||
sleep(20);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save more CPU resources when camera is no longer needed.
|
||||
visionPortal.close();
|
||||
|
||||
} // end runOpMode()
|
||||
|
||||
/**
|
||||
* Initialize the Vuforia localization engine.
|
||||
*/
|
||||
private void initVuforia() {
|
||||
/*
|
||||
* Configure Vuforia by creating a Parameter object, and passing it to the Vuforia engine.
|
||||
*/
|
||||
VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters();
|
||||
|
||||
parameters.vuforiaLicenseKey = VUFORIA_KEY;
|
||||
parameters.cameraDirection = CameraDirection.BACK;
|
||||
|
||||
// Instantiate the Vuforia engine
|
||||
vuforia = ClassFactory.getInstance().createVuforia(parameters);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the TensorFlow Object Detection engine.
|
||||
* Initialize the TensorFlow Object Detection processor.
|
||||
*/
|
||||
private void initTfod() {
|
||||
int tfodMonitorViewId = hardwareMap.appContext.getResources().getIdentifier(
|
||||
"tfodMonitorViewId", "id", hardwareMap.appContext.getPackageName());
|
||||
TFObjectDetector.Parameters tfodParameters = new TFObjectDetector.Parameters(tfodMonitorViewId);
|
||||
tfodParameters.minResultConfidence = 0.75f;
|
||||
tfodParameters.isModelTensorFlow2 = true;
|
||||
tfodParameters.inputSize = 300;
|
||||
tfod = ClassFactory.getInstance().createTFObjectDetector(tfodParameters, vuforia);
|
||||
|
||||
// Use loadModelFromAsset() if the TF Model is built in as an asset by Android Studio
|
||||
// Use loadModelFromFile() if you have downloaded a custom team model to the Robot Controller's FLASH.
|
||||
tfod.loadModelFromAsset(TFOD_MODEL_ASSET, LABELS);
|
||||
// tfod.loadModelFromFile(TFOD_MODEL_FILE, LABELS);
|
||||
}
|
||||
}
|
||||
// Create the TensorFlow processor by using a builder.
|
||||
tfod = new TfodProcessor.Builder()
|
||||
|
||||
// Use setModelAssetName() if the TF Model is built in as an asset.
|
||||
// Use setModelFileName() if you have downloaded a custom team model to the Robot Controller.
|
||||
//.setModelAssetName(TFOD_MODEL_ASSET)
|
||||
//.setModelFileName(TFOD_MODEL_FILE)
|
||||
|
||||
//.setModelLabels(LABELS)
|
||||
//.setIsModelTensorFlow2(true)
|
||||
//.setIsModelQuantized(true)
|
||||
//.setModelInputSize(300)
|
||||
//.setModelAspectRatio(16.0 / 9.0)
|
||||
|
||||
.build();
|
||||
|
||||
// Create the vision portal by using a builder.
|
||||
VisionPortal.Builder builder = new VisionPortal.Builder();
|
||||
|
||||
// Set the camera (webcam vs. built-in RC phone camera).
|
||||
if (USE_WEBCAM) {
|
||||
builder.setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"));
|
||||
} else {
|
||||
builder.setCamera(BuiltinCameraDirection.BACK);
|
||||
}
|
||||
|
||||
// Choose a camera resolution. Not all cameras support all resolutions.
|
||||
//builder.setCameraResolution(new Size(640, 480));
|
||||
|
||||
// Enable the RC preview (LiveView). Set "false" to omit camera monitoring.
|
||||
//builder.enableCameraMonitoring(true);
|
||||
|
||||
// Set the stream format; MJPEG uses less bandwidth than default YUY2.
|
||||
//builder.setStreamFormat(VisionPortal.StreamFormat.YUY2);
|
||||
|
||||
// Choose whether or not LiveView stops if no processors are enabled.
|
||||
// If set "true", monitor shows solid orange screen if no processors enabled.
|
||||
// If set "false", monitor shows camera view without annotations.
|
||||
//builder.setAutoStopLiveView(false);
|
||||
|
||||
// Set and enable the processor.
|
||||
builder.addProcessor(tfod);
|
||||
|
||||
// Build the Vision Portal, using the above settings.
|
||||
visionPortal = builder.build();
|
||||
|
||||
// Set confidence threshold for TFOD recognitions, at any time.
|
||||
//tfod.setMinResultConfidence(0.75f);
|
||||
|
||||
// Disable or re-enable the TFOD processor at any time.
|
||||
//visionPortal.setProcessorEnabled(tfod, true);
|
||||
|
||||
} // end method initTfod()
|
||||
|
||||
/**
|
||||
* Function to add telemetry about TensorFlow Object Detection (TFOD) recognitions.
|
||||
*/
|
||||
private void telemetryTfod() {
|
||||
|
||||
List<Recognition> currentRecognitions = tfod.getRecognitions();
|
||||
telemetry.addData("# Objects Detected", currentRecognitions.size());
|
||||
|
||||
// Step through the list of recognitions and display info for each one.
|
||||
for (Recognition recognition : currentRecognitions) {
|
||||
double x = (recognition.getLeft() + recognition.getRight()) / 2 ;
|
||||
double y = (recognition.getTop() + recognition.getBottom()) / 2 ;
|
||||
|
||||
telemetry.addData(""," ");
|
||||
telemetry.addData("Image", "%s (%.0f %% Conf.)", recognition.getLabel(), recognition.getConfidence() * 100);
|
||||
telemetry.addData("- Position", "%.0f / %.0f", x, y);
|
||||
telemetry.addData("- Size", "%.0f x %.0f", recognition.getWidth(), recognition.getHeight());
|
||||
} // end for() loop
|
||||
|
||||
} // end method telemetryTfod()
|
||||
|
||||
} // end class
|
||||
|
@ -0,0 +1,141 @@
|
||||
/* Copyright (c) 2019 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import java.util.List;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.BuiltinCameraDirection;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.tfod.Recognition;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.tfod.TfodProcessor;
|
||||
|
||||
/**
|
||||
* This 2023-2024 OpMode illustrates the basics of TensorFlow Object Detection, using
|
||||
* the easiest way.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*/
|
||||
@TeleOp(name = "Concept: TensorFlow Object Detection Easy", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptTensorFlowObjectDetectionEasy extends LinearOpMode {
|
||||
|
||||
private static final boolean USE_WEBCAM = true; // true for webcam, false for phone camera
|
||||
|
||||
/**
|
||||
* {@link #tfod} is the variable to store our instance of the TensorFlow Object Detection processor.
|
||||
*/
|
||||
private TfodProcessor tfod;
|
||||
|
||||
/**
|
||||
* {@link #visionPortal} is the variable to store our instance of the vision portal.
|
||||
*/
|
||||
private VisionPortal visionPortal;
|
||||
|
||||
@Override
|
||||
public void runOpMode() {
|
||||
|
||||
initTfod();
|
||||
|
||||
// Wait for the DS start button to be touched.
|
||||
telemetry.addData("DS preview on/off", "3 dots, Camera Stream");
|
||||
telemetry.addData(">", "Touch Play to start OpMode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
if (opModeIsActive()) {
|
||||
while (opModeIsActive()) {
|
||||
|
||||
telemetryTfod();
|
||||
|
||||
// Push telemetry to the Driver Station.
|
||||
telemetry.update();
|
||||
|
||||
// Save CPU resources; can resume streaming when needed.
|
||||
if (gamepad1.dpad_down) {
|
||||
visionPortal.stopStreaming();
|
||||
} else if (gamepad1.dpad_up) {
|
||||
visionPortal.resumeStreaming();
|
||||
}
|
||||
|
||||
// Share the CPU.
|
||||
sleep(20);
|
||||
}
|
||||
}
|
||||
|
||||
// Save more CPU resources when camera is no longer needed.
|
||||
visionPortal.close();
|
||||
|
||||
} // end runOpMode()
|
||||
|
||||
/**
|
||||
* Initialize the TensorFlow Object Detection processor.
|
||||
*/
|
||||
private void initTfod() {
|
||||
|
||||
// Create the TensorFlow processor the easy way.
|
||||
tfod = TfodProcessor.easyCreateWithDefaults();
|
||||
|
||||
// Create the vision portal the easy way.
|
||||
if (USE_WEBCAM) {
|
||||
visionPortal = VisionPortal.easyCreateWithDefaults(
|
||||
hardwareMap.get(WebcamName.class, "Webcam 1"), tfod);
|
||||
} else {
|
||||
visionPortal = VisionPortal.easyCreateWithDefaults(
|
||||
BuiltinCameraDirection.BACK, tfod);
|
||||
}
|
||||
|
||||
} // end method initTfod()
|
||||
|
||||
/**
|
||||
* Function to add telemetry about TensorFlow Object Detection (TFOD) recognitions.
|
||||
*/
|
||||
private void telemetryTfod() {
|
||||
|
||||
List<Recognition> currentRecognitions = tfod.getRecognitions();
|
||||
telemetry.addData("# Objects Detected", currentRecognitions.size());
|
||||
|
||||
// Step through the list of recognitions and display info for each one.
|
||||
for (Recognition recognition : currentRecognitions) {
|
||||
double x = (recognition.getLeft() + recognition.getRight()) / 2 ;
|
||||
double y = (recognition.getTop() + recognition.getBottom()) / 2 ;
|
||||
|
||||
telemetry.addData(""," ");
|
||||
telemetry.addData("Image", "%s (%.0f %% Conf.)", recognition.getLabel(), recognition.getConfidence() * 100);
|
||||
telemetry.addData("- Position", "%.0f / %.0f", x, y);
|
||||
telemetry.addData("- Size", "%.0f x %.0f", recognition.getWidth(), recognition.getHeight());
|
||||
} // end for() loop
|
||||
|
||||
} // end method telemetryTfod()
|
||||
|
||||
} // end class
|
@ -34,191 +34,152 @@ import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import java.util.List;
|
||||
import org.firstinspires.ftc.robotcore.external.ClassFactory;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.SwitchableCamera;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.CameraName;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer;
|
||||
import org.firstinspires.ftc.robotcore.external.tfod.TFObjectDetector;
|
||||
import org.firstinspires.ftc.robotcore.external.tfod.Recognition;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.VisionPortal.CameraState;
|
||||
import org.firstinspires.ftc.vision.tfod.TfodProcessor;
|
||||
|
||||
/**
|
||||
* This 2022-2023 OpMode illustrates the basics of using the TensorFlow Object Detection API to
|
||||
* determine which image is being presented to the robot.
|
||||
* This 2023-2024 OpMode illustrates the basics of TensorFlow Object Detection, using
|
||||
* two webcams.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this opmode to the Driver Station OpMode list.
|
||||
*
|
||||
* IMPORTANT: In order to use this OpMode, you need to obtain your own Vuforia license key as
|
||||
* is explained below.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*/
|
||||
@TeleOp(name = "Concept: TensorFlow Object Detection Switchable Cameras", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptTensorFlowObjectDetectionSwitchableCameras extends LinearOpMode {
|
||||
|
||||
/*
|
||||
* Specify the source for the Tensor Flow Model.
|
||||
* If the TensorFlowLite object model is included in the Robot Controller App as an "asset",
|
||||
* the OpMode must to load it using loadModelFromAsset(). However, if a team generated model
|
||||
* has been downloaded to the Robot Controller's SD FLASH memory, it must to be loaded using loadModelFromFile()
|
||||
* Here we assume it's an Asset. Also see method initTfod() below .
|
||||
*/
|
||||
private static final String TFOD_MODEL_ASSET = "PowerPlay.tflite";
|
||||
// private static final String TFOD_MODEL_FILE = "/sdcard/FIRST/tflitemodels/CustomTeamModel.tflite";
|
||||
|
||||
private static final String[] LABELS = {
|
||||
"1 Bolt",
|
||||
"2 Bulb",
|
||||
"3 Panel"
|
||||
};
|
||||
|
||||
/*
|
||||
* IMPORTANT: You need to obtain your own license key to use Vuforia. The string below with which
|
||||
* 'parameters.vuforiaLicenseKey' is initialized is for illustration only, and will not function.
|
||||
* A Vuforia 'Development' license key, can be obtained free of charge from the Vuforia developer
|
||||
* web site at https://developer.vuforia.com/license-manager.
|
||||
*
|
||||
* Vuforia license keys are always 380 characters long, and look as if they contain mostly
|
||||
* random data. As an example, here is a example of a fragment of a valid key:
|
||||
* ... yIgIzTqZ4mWjk9wd3cZO9T1axEqzuhxoGlfOOI2dRzKS4T0hQ8kT ...
|
||||
* Once you've obtained a license key, copy the string from the Vuforia web site
|
||||
* and paste it in to your code on the next line, between the double quotes.
|
||||
*/
|
||||
private static final String VUFORIA_KEY =
|
||||
" -- YOUR NEW VUFORIA KEY GOES HERE --- ";
|
||||
|
||||
/**
|
||||
* {@link #vuforia} is the variable we will use to store our instance of the Vuforia
|
||||
* localization engine.
|
||||
*/
|
||||
private VuforiaLocalizer vuforia;
|
||||
|
||||
/**
|
||||
* Variables used for switching cameras.
|
||||
*/
|
||||
private WebcamName webcam1, webcam2;
|
||||
private SwitchableCamera switchableCamera;
|
||||
private boolean oldLeftBumper;
|
||||
private boolean oldRightBumper;
|
||||
|
||||
/**
|
||||
* {@link #tfod} is the variable we will use to store our instance of the TensorFlow Object
|
||||
* Detection engine.
|
||||
* {@link #tfod} is the variable to store our instance of the TensorFlow Object Detection processor.
|
||||
*/
|
||||
private TFObjectDetector tfod;
|
||||
private TfodProcessor tfod;
|
||||
|
||||
/**
|
||||
* {@link #visionPortal} is the variable to store our instance of the vision portal.
|
||||
*/
|
||||
private VisionPortal visionPortal;
|
||||
|
||||
@Override
|
||||
public void runOpMode() {
|
||||
// The TFObjectDetector uses the camera frames from the VuforiaLocalizer, so we create that
|
||||
// first.
|
||||
initVuforia();
|
||||
|
||||
initTfod();
|
||||
|
||||
/**
|
||||
* Activate TensorFlow Object Detection before we wait for the start command.
|
||||
* Do it here so that the Camera Stream window will have the TensorFlow annotations visible.
|
||||
**/
|
||||
if (tfod != null) {
|
||||
tfod.activate();
|
||||
|
||||
// The TensorFlow software will scale the input images from the camera to a lower resolution.
|
||||
// This can result in lower detection accuracy at longer distances (> 55cm or 22").
|
||||
// If your target is at distance greater than 50 cm (20") you can increase the magnification value
|
||||
// to artificially zoom in to the center of image. For best results, the "aspectRatio" argument
|
||||
// should be set to the value of the images used to create the TensorFlow Object Detection model
|
||||
// (typically 16/9).
|
||||
tfod.setZoom(1.0, 16.0/9.0);
|
||||
}
|
||||
|
||||
/** Wait for the game to begin */
|
||||
telemetry.addData(">", "Press Play to start op mode");
|
||||
// Wait for the DS start button to be touched.
|
||||
telemetry.addData("DS preview on/off", "3 dots, Camera Stream");
|
||||
telemetry.addData(">", "Touch Play to start OpMode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
if (opModeIsActive()) {
|
||||
while (opModeIsActive()) {
|
||||
if (tfod != null) {
|
||||
doCameraSwitching();
|
||||
List<Recognition> recognitions = tfod.getRecognitions();
|
||||
telemetry.addData("# Objects Detected", recognitions.size());
|
||||
// step through the list of recognitions and display image size and position
|
||||
// Note: "Image number" refers to the randomized image orientation/number
|
||||
for (Recognition recognition : recognitions) {
|
||||
double col = (recognition.getLeft() + recognition.getRight()) / 2 ;
|
||||
double row = (recognition.getTop() + recognition.getBottom()) / 2 ;
|
||||
double width = Math.abs(recognition.getRight() - recognition.getLeft()) ;
|
||||
double height = Math.abs(recognition.getTop() - recognition.getBottom()) ;
|
||||
|
||||
telemetry.addData(""," ");
|
||||
telemetry.addData("Image", "%s (%.0f %% Conf.)", recognition.getLabel(), recognition.getConfidence() * 100 );
|
||||
telemetry.addData("- Position (Row/Col)","%.0f / %.0f", row, col);
|
||||
telemetry.addData("- Size (Width/Height)","%.0f / %.0f", width, height);
|
||||
}
|
||||
telemetry.update();
|
||||
telemetryCameraSwitching();
|
||||
telemetryTfod();
|
||||
|
||||
// Push telemetry to the Driver Station.
|
||||
telemetry.update();
|
||||
|
||||
// Save CPU resources; can resume streaming when needed.
|
||||
if (gamepad1.dpad_down) {
|
||||
visionPortal.stopStreaming();
|
||||
} else if (gamepad1.dpad_up) {
|
||||
visionPortal.resumeStreaming();
|
||||
}
|
||||
|
||||
doCameraSwitching();
|
||||
|
||||
// Share the CPU.
|
||||
sleep(20);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save more CPU resources when camera is no longer needed.
|
||||
visionPortal.close();
|
||||
|
||||
} // end runOpMode()
|
||||
|
||||
/**
|
||||
* Initialize the Vuforia localization engine.
|
||||
*/
|
||||
private void initVuforia() {
|
||||
/*
|
||||
* Configure Vuforia by creating a Parameter object, and passing it to the Vuforia engine.
|
||||
*/
|
||||
VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters();
|
||||
|
||||
parameters.vuforiaLicenseKey = VUFORIA_KEY;
|
||||
|
||||
// Indicate that we wish to be able to switch cameras.
|
||||
webcam1 = hardwareMap.get(WebcamName.class, "Webcam 1");
|
||||
webcam2 = hardwareMap.get(WebcamName.class, "Webcam 2");
|
||||
parameters.cameraName = ClassFactory.getInstance().getCameraManager().nameForSwitchableCamera(webcam1, webcam2);
|
||||
|
||||
// Instantiate the Vuforia engine
|
||||
vuforia = ClassFactory.getInstance().createVuforia(parameters);
|
||||
|
||||
// Set the active camera to Webcam 1.
|
||||
switchableCamera = (SwitchableCamera) vuforia.getCamera();
|
||||
switchableCamera.setActiveCamera(webcam1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the TensorFlow Object Detection engine.
|
||||
* Initialize the TensorFlow Object Detection processor.
|
||||
*/
|
||||
private void initTfod() {
|
||||
int tfodMonitorViewId = hardwareMap.appContext.getResources().getIdentifier(
|
||||
"tfodMonitorViewId", "id", hardwareMap.appContext.getPackageName());
|
||||
TFObjectDetector.Parameters tfodParameters = new TFObjectDetector.Parameters(tfodMonitorViewId);
|
||||
tfodParameters.minResultConfidence = 0.75f;
|
||||
tfodParameters.isModelTensorFlow2 = true;
|
||||
tfodParameters.inputSize = 300;
|
||||
tfod = ClassFactory.getInstance().createTFObjectDetector(tfodParameters, vuforia);
|
||||
|
||||
// Use loadModelFromAsset() if the TF Model is built in as an asset by Android Studio
|
||||
// Use loadModelFromFile() if you have downloaded a custom team model to the Robot Controller's FLASH.
|
||||
tfod.loadModelFromAsset(TFOD_MODEL_ASSET, LABELS);
|
||||
// tfod.loadModelFromFile(TFOD_MODEL_FILE, LABELS);
|
||||
}
|
||||
// Create the TensorFlow processor by using a builder.
|
||||
tfod = new TfodProcessor.Builder().build();
|
||||
|
||||
private void doCameraSwitching() {
|
||||
// If the left bumper is pressed, use Webcam 1.
|
||||
// If the right bumper is pressed, use Webcam 2.
|
||||
boolean newLeftBumper = gamepad1.left_bumper;
|
||||
boolean newRightBumper = gamepad1.right_bumper;
|
||||
if (newLeftBumper && !oldLeftBumper) {
|
||||
switchableCamera.setActiveCamera(webcam1);
|
||||
} else if (newRightBumper && !oldRightBumper) {
|
||||
switchableCamera.setActiveCamera(webcam2);
|
||||
}
|
||||
oldLeftBumper = newLeftBumper;
|
||||
oldRightBumper = newRightBumper;
|
||||
webcam1 = hardwareMap.get(WebcamName.class, "Webcam 1");
|
||||
webcam2 = hardwareMap.get(WebcamName.class, "Webcam 2");
|
||||
CameraName switchableCamera = ClassFactory.getInstance()
|
||||
.getCameraManager().nameForSwitchableCamera(webcam1, webcam2);
|
||||
|
||||
if (switchableCamera.getActiveCamera().equals(webcam1)) {
|
||||
// Create the vision portal by using a builder.
|
||||
visionPortal = new VisionPortal.Builder()
|
||||
.setCamera(switchableCamera)
|
||||
.addProcessor(tfod)
|
||||
.build();
|
||||
|
||||
} // end method initTfod()
|
||||
|
||||
/**
|
||||
* Function to add telemetry about camera switching.
|
||||
*/
|
||||
private void telemetryCameraSwitching() {
|
||||
if (visionPortal.getActiveCamera().equals(webcam1)) {
|
||||
telemetry.addData("activeCamera", "Webcam 1");
|
||||
telemetry.addData("Press RightBumper", "to switch to Webcam 2");
|
||||
} else {
|
||||
telemetry.addData("activeCamera", "Webcam 2");
|
||||
telemetry.addData("Press LeftBumper", "to switch to Webcam 1");
|
||||
}
|
||||
}
|
||||
}
|
||||
} // end method telemetryCameraSwitching()
|
||||
|
||||
/**
|
||||
* Function to add telemetry about TensorFlow Object Detection (TFOD) recognitions.
|
||||
*/
|
||||
private void telemetryTfod() {
|
||||
|
||||
List<Recognition> currentRecognitions = tfod.getRecognitions();
|
||||
telemetry.addData("# Objects Detected", currentRecognitions.size());
|
||||
|
||||
// Step through the list of recognitions and display info for each one.
|
||||
for (Recognition recognition : currentRecognitions) {
|
||||
double x = (recognition.getLeft() + recognition.getRight()) / 2 ;
|
||||
double y = (recognition.getTop() + recognition.getBottom()) / 2 ;
|
||||
|
||||
telemetry.addData(""," ");
|
||||
telemetry.addData("Image", "%s (%.0f %% Conf.)", recognition.getLabel(), recognition.getConfidence() * 100);
|
||||
telemetry.addData("- Position", "%.0f / %.0f", x, y);
|
||||
telemetry.addData("- Size", "%.0f x %.0f", recognition.getWidth(), recognition.getHeight());
|
||||
} // end for() loop
|
||||
|
||||
} // end method telemetryTfod()
|
||||
|
||||
/**
|
||||
* Function to set the active camera according to input from the gamepad.
|
||||
*/
|
||||
private void doCameraSwitching() {
|
||||
if (visionPortal.getCameraState() == CameraState.STREAMING) {
|
||||
// If the left bumper is pressed, use Webcam 1.
|
||||
// If the right bumper is pressed, use Webcam 2.
|
||||
boolean newLeftBumper = gamepad1.left_bumper;
|
||||
boolean newRightBumper = gamepad1.right_bumper;
|
||||
if (newLeftBumper && !oldLeftBumper) {
|
||||
visionPortal.setActiveCamera(webcam1);
|
||||
} else if (newRightBumper && !oldRightBumper) {
|
||||
visionPortal.setActiveCamera(webcam2);
|
||||
}
|
||||
oldLeftBumper = newLeftBumper;
|
||||
oldRightBumper = newRightBumper;
|
||||
}
|
||||
} // end method doCameraSwitching()
|
||||
|
||||
} // end class
|
||||
|
@ -1,190 +0,0 @@
|
||||
/* Copyright (c) 2019 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import java.util.List;
|
||||
import org.firstinspires.ftc.robotcore.external.ClassFactory;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer;
|
||||
import org.firstinspires.ftc.robotcore.external.tfod.TFObjectDetector;
|
||||
import org.firstinspires.ftc.robotcore.external.tfod.Recognition;
|
||||
|
||||
/**
|
||||
* This 2022-2023 OpMode illustrates the basics of using the TensorFlow Object Detection API to
|
||||
* determine which image is being presented to the robot.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this opmode to the Driver Station OpMode list.
|
||||
*
|
||||
* IMPORTANT: In order to use this OpMode, you need to obtain your own Vuforia license key as
|
||||
* is explained below.
|
||||
*/
|
||||
@TeleOp(name = "Concept: TensorFlow Object Detection Webcam", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptTensorFlowObjectDetectionWebcam extends LinearOpMode {
|
||||
|
||||
/*
|
||||
* Specify the source for the Tensor Flow Model.
|
||||
* If the TensorFlowLite object model is included in the Robot Controller App as an "asset",
|
||||
* the OpMode must to load it using loadModelFromAsset(). However, if a team generated model
|
||||
* has been downloaded to the Robot Controller's SD FLASH memory, it must to be loaded using loadModelFromFile()
|
||||
* Here we assume it's an Asset. Also see method initTfod() below .
|
||||
*/
|
||||
private static final String TFOD_MODEL_ASSET = "PowerPlay.tflite";
|
||||
// private static final String TFOD_MODEL_FILE = "/sdcard/FIRST/tflitemodels/CustomTeamModel.tflite";
|
||||
|
||||
|
||||
private static final String[] LABELS = {
|
||||
"1 Bolt",
|
||||
"2 Bulb",
|
||||
"3 Panel"
|
||||
};
|
||||
|
||||
/*
|
||||
* IMPORTANT: You need to obtain your own license key to use Vuforia. The string below with which
|
||||
* 'parameters.vuforiaLicenseKey' is initialized is for illustration only, and will not function.
|
||||
* A Vuforia 'Development' license key, can be obtained free of charge from the Vuforia developer
|
||||
* web site at https://developer.vuforia.com/license-manager.
|
||||
*
|
||||
* Vuforia license keys are always 380 characters long, and look as if they contain mostly
|
||||
* random data. As an example, here is a example of a fragment of a valid key:
|
||||
* ... yIgIzTqZ4mWjk9wd3cZO9T1axEqzuhxoGlfOOI2dRzKS4T0hQ8kT ...
|
||||
* Once you've obtained a license key, copy the string from the Vuforia web site
|
||||
* and paste it in to your code on the next line, between the double quotes.
|
||||
*/
|
||||
private static final String VUFORIA_KEY =
|
||||
" -- YOUR NEW VUFORIA KEY GOES HERE --- ";
|
||||
|
||||
/**
|
||||
* {@link #vuforia} is the variable we will use to store our instance of the Vuforia
|
||||
* localization engine.
|
||||
*/
|
||||
private VuforiaLocalizer vuforia;
|
||||
|
||||
/**
|
||||
* {@link #tfod} is the variable we will use to store our instance of the TensorFlow Object
|
||||
* Detection engine.
|
||||
*/
|
||||
private TFObjectDetector tfod;
|
||||
|
||||
@Override
|
||||
public void runOpMode() {
|
||||
// The TFObjectDetector uses the camera frames from the VuforiaLocalizer, so we create that
|
||||
// first.
|
||||
initVuforia();
|
||||
initTfod();
|
||||
|
||||
/**
|
||||
* Activate TensorFlow Object Detection before we wait for the start command.
|
||||
* Do it here so that the Camera Stream window will have the TensorFlow annotations visible.
|
||||
**/
|
||||
if (tfod != null) {
|
||||
tfod.activate();
|
||||
|
||||
// The TensorFlow software will scale the input images from the camera to a lower resolution.
|
||||
// This can result in lower detection accuracy at longer distances (> 55cm or 22").
|
||||
// If your target is at distance greater than 50 cm (20") you can increase the magnification value
|
||||
// to artificially zoom in to the center of image. For best results, the "aspectRatio" argument
|
||||
// should be set to the value of the images used to create the TensorFlow Object Detection model
|
||||
// (typically 16/9).
|
||||
tfod.setZoom(1.0, 16.0/9.0);
|
||||
}
|
||||
|
||||
/** Wait for the game to begin */
|
||||
telemetry.addData(">", "Press Play to start op mode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
if (opModeIsActive()) {
|
||||
while (opModeIsActive()) {
|
||||
if (tfod != null) {
|
||||
// getUpdatedRecognitions() will return null if no new information is available since
|
||||
// the last time that call was made.
|
||||
List<Recognition> updatedRecognitions = tfod.getUpdatedRecognitions();
|
||||
if (updatedRecognitions != null) {
|
||||
telemetry.addData("# Objects Detected", updatedRecognitions.size());
|
||||
|
||||
// step through the list of recognitions and display image position/size information for each one
|
||||
// Note: "Image number" refers to the randomized image orientation/number
|
||||
for (Recognition recognition : updatedRecognitions) {
|
||||
double col = (recognition.getLeft() + recognition.getRight()) / 2 ;
|
||||
double row = (recognition.getTop() + recognition.getBottom()) / 2 ;
|
||||
double width = Math.abs(recognition.getRight() - recognition.getLeft()) ;
|
||||
double height = Math.abs(recognition.getTop() - recognition.getBottom()) ;
|
||||
|
||||
telemetry.addData(""," ");
|
||||
telemetry.addData("Image", "%s (%.0f %% Conf.)", recognition.getLabel(), recognition.getConfidence() * 100 );
|
||||
telemetry.addData("- Position (Row/Col)","%.0f / %.0f", row, col);
|
||||
telemetry.addData("- Size (Width/Height)","%.0f / %.0f", width, height);
|
||||
}
|
||||
telemetry.update();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the Vuforia localization engine.
|
||||
*/
|
||||
private void initVuforia() {
|
||||
/*
|
||||
* Configure Vuforia by creating a Parameter object, and passing it to the Vuforia engine.
|
||||
*/
|
||||
VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters();
|
||||
|
||||
parameters.vuforiaLicenseKey = VUFORIA_KEY;
|
||||
parameters.cameraName = hardwareMap.get(WebcamName.class, "Webcam 1");
|
||||
|
||||
// Instantiate the Vuforia engine
|
||||
vuforia = ClassFactory.getInstance().createVuforia(parameters);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the TensorFlow Object Detection engine.
|
||||
*/
|
||||
private void initTfod() {
|
||||
int tfodMonitorViewId = hardwareMap.appContext.getResources().getIdentifier(
|
||||
"tfodMonitorViewId", "id", hardwareMap.appContext.getPackageName());
|
||||
TFObjectDetector.Parameters tfodParameters = new TFObjectDetector.Parameters(tfodMonitorViewId);
|
||||
tfodParameters.minResultConfidence = 0.75f;
|
||||
tfodParameters.isModelTensorFlow2 = true;
|
||||
tfodParameters.inputSize = 300;
|
||||
tfod = ClassFactory.getInstance().createTFObjectDetector(tfodParameters, vuforia);
|
||||
|
||||
// Use loadModelFromAsset() if the TF Model is built in as an asset by Android Studio
|
||||
// Use loadModelFromFile() if you have downloaded a custom team model to the Robot Controller's FLASH.
|
||||
tfod.loadModelFromAsset(TFOD_MODEL_ASSET, LABELS);
|
||||
// tfod.loadModelFromFile(TFOD_MODEL_FILE, LABELS);
|
||||
}
|
||||
}
|
@ -1,186 +0,0 @@
|
||||
/* Copyright (c) 2017 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
|
||||
import org.firstinspires.ftc.robotcore.external.ClassFactory;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.OpenGLMatrix;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.VectorF;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.AngleUnit;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.AxesOrder;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.AxesReference;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.Orientation;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.RelicRecoveryVuMark;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuMarkInstanceId;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackable;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackableDefaultListener;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackables;
|
||||
|
||||
/**
|
||||
* This OpMode illustrates the basics of using the Vuforia engine to determine
|
||||
* the identity of Vuforia VuMarks encountered on the field. The code is structured as
|
||||
* a LinearOpMode. It shares much structure with {@link ConceptVuforiaFieldNavigation}; we do not here
|
||||
* duplicate the core Vuforia documentation found there, but rather instead focus on the
|
||||
* differences between the use of Vuforia for navigation vs VuMark identification.
|
||||
*
|
||||
* @see ConceptVuforiaFieldNavigation
|
||||
* @see VuforiaLocalizer
|
||||
* @see VuforiaTrackableDefaultListener
|
||||
* see ftc_app/doc/tutorial/FTC_FieldCoordinateSystemDefinition.pdf
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*
|
||||
* IMPORTANT: In order to use this OpMode, you need to obtain your own Vuforia license key as
|
||||
* is explained below.
|
||||
*/
|
||||
|
||||
@TeleOp(name="Concept: VuMark Id", group ="Concept")
|
||||
@Disabled
|
||||
public class ConceptVuMarkIdentification extends LinearOpMode {
|
||||
|
||||
public static final String TAG = "Vuforia VuMark Sample";
|
||||
|
||||
OpenGLMatrix lastLocation = null;
|
||||
|
||||
/**
|
||||
* {@link #vuforia} is the variable we will use to store our instance of the Vuforia
|
||||
* localization engine.
|
||||
*/
|
||||
VuforiaLocalizer vuforia;
|
||||
|
||||
@Override public void runOpMode() {
|
||||
|
||||
/*
|
||||
* To start up Vuforia, tell it the view that we wish to use for camera monitor (on the RC phone);
|
||||
*/
|
||||
int cameraMonitorViewId = hardwareMap.appContext.getResources().getIdentifier("cameraMonitorViewId", "id", hardwareMap.appContext.getPackageName());
|
||||
VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters(cameraMonitorViewId);
|
||||
|
||||
// OR... Do Not Activate the Camera Monitor View, to save power
|
||||
// VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters();
|
||||
|
||||
/*
|
||||
* IMPORTANT: You need to obtain your own license key to use Vuforia. The string below with which
|
||||
* 'parameters.vuforiaLicenseKey' is initialized is for illustration only, and will not function.
|
||||
* A Vuforia 'Development' license key, can be obtained free of charge from the Vuforia developer
|
||||
* web site at https://developer.vuforia.com/license-manager.
|
||||
*
|
||||
* Vuforia license keys are always 380 characters long, and look as if they contain mostly
|
||||
* random data. As an example, here is a example of a fragment of a valid key:
|
||||
* ... yIgIzTqZ4mWjk9wd3cZO9T1axEqzuhxoGlfOOI2dRzKS4T0hQ8kT ...
|
||||
* Once you've obtained a license key, copy the string from the Vuforia web site
|
||||
* and paste it in to your code on the next line, between the double quotes.
|
||||
*/
|
||||
parameters.vuforiaLicenseKey = " -- YOUR NEW VUFORIA KEY GOES HERE --- ";
|
||||
|
||||
/*
|
||||
* We also indicate which camera on the RC that we wish to use.
|
||||
* Here we chose the back (HiRes) camera (for greater range), but
|
||||
* for a competition robot, the front camera might be more convenient.
|
||||
*/
|
||||
parameters.cameraDirection = VuforiaLocalizer.CameraDirection.BACK;
|
||||
|
||||
/**
|
||||
* Instantiate the Vuforia engine
|
||||
*/
|
||||
vuforia = ClassFactory.getInstance().createVuforia(parameters);
|
||||
|
||||
|
||||
/**
|
||||
* Load the data set containing the VuMarks for Relic Recovery. There's only one trackable
|
||||
* in this data set: all three of the VuMarks in the game were created from this one template,
|
||||
* but differ in their instance id information.
|
||||
* @see VuMarkInstanceId
|
||||
*/
|
||||
VuforiaTrackables relicTrackables = this.vuforia.loadTrackablesFromAsset("RelicVuMark");
|
||||
VuforiaTrackable relicTemplate = relicTrackables.get(0);
|
||||
relicTemplate.setName("relicVuMarkTemplate"); // can help in debugging; otherwise not necessary
|
||||
|
||||
telemetry.addData(">", "Press Play to start");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
relicTrackables.activate();
|
||||
|
||||
while (opModeIsActive()) {
|
||||
|
||||
/**
|
||||
* See if any of the instances of {@link relicTemplate} are currently visible.
|
||||
* {@link RelicRecoveryVuMark} is an enum which can have the following values:
|
||||
* UNKNOWN, LEFT, CENTER, and RIGHT. When a VuMark is visible, something other than
|
||||
* UNKNOWN will be returned by {@link RelicRecoveryVuMark#from(VuforiaTrackable)}.
|
||||
*/
|
||||
RelicRecoveryVuMark vuMark = RelicRecoveryVuMark.from(relicTemplate);
|
||||
if (vuMark != RelicRecoveryVuMark.UNKNOWN) {
|
||||
|
||||
/* Found an instance of the template. In the actual game, you will probably
|
||||
* loop until this condition occurs, then move on to act accordingly depending
|
||||
* on which VuMark was visible. */
|
||||
telemetry.addData("VuMark", "%s visible", vuMark);
|
||||
|
||||
/* For fun, we also exhibit the navigational pose. In the Relic Recovery game,
|
||||
* it is perhaps unlikely that you will actually need to act on this pose information, but
|
||||
* we illustrate it nevertheless, for completeness. */
|
||||
OpenGLMatrix pose = ((VuforiaTrackableDefaultListener)relicTemplate.getListener()).getPose();
|
||||
telemetry.addData("Pose", format(pose));
|
||||
|
||||
/* We further illustrate how to decompose the pose into useful rotational and
|
||||
* translational components */
|
||||
if (pose != null) {
|
||||
VectorF trans = pose.getTranslation();
|
||||
Orientation rot = Orientation.getOrientation(pose, AxesReference.EXTRINSIC, AxesOrder.XYZ, AngleUnit.DEGREES);
|
||||
|
||||
// Extract the X, Y, and Z components of the offset of the target relative to the robot
|
||||
double tX = trans.get(0);
|
||||
double tY = trans.get(1);
|
||||
double tZ = trans.get(2);
|
||||
|
||||
// Extract the rotational components of the target relative to the robot
|
||||
double rX = rot.firstAngle;
|
||||
double rY = rot.secondAngle;
|
||||
double rZ = rot.thirdAngle;
|
||||
}
|
||||
}
|
||||
else {
|
||||
telemetry.addData("VuMark", "not visible");
|
||||
}
|
||||
|
||||
telemetry.update();
|
||||
}
|
||||
}
|
||||
|
||||
String format(OpenGLMatrix transformationMatrix) {
|
||||
return (transformationMatrix != null) ? transformationMatrix.formatAsTransform() : "null";
|
||||
}
|
||||
}
|
@ -1,194 +0,0 @@
|
||||
/* Copyright (c) 2017 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
|
||||
import org.firstinspires.ftc.robotcore.external.ClassFactory;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.OpenGLMatrix;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.VectorF;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.AngleUnit;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.AxesOrder;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.AxesReference;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.Orientation;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.RelicRecoveryVuMark;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuMarkInstanceId;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackable;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackableDefaultListener;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackables;
|
||||
|
||||
/**
|
||||
* This OpMode illustrates the basics of using the Vuforia engine to determine
|
||||
* the identity of Vuforia VuMarks encountered on the field. The code is structured as
|
||||
* a LinearOpMode. It shares much structure with {@link ConceptVuforiaFieldNavigationWebcam}; we do not here
|
||||
* duplicate the core Vuforia documentation found there, but rather instead focus on the
|
||||
* differences between the use of Vuforia for navigation vs VuMark identification.
|
||||
*
|
||||
* @see ConceptVuforiaFieldNavigationWebcam
|
||||
* @see VuforiaLocalizer
|
||||
* @see VuforiaTrackableDefaultListener
|
||||
* see ftc_app/doc/tutorial/FTC_FieldCoordinateSystemDefinition.pdf
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*
|
||||
* IMPORTANT: In order to use this OpMode, you need to obtain your own Vuforia license key as
|
||||
* is explained below
|
||||
*/
|
||||
|
||||
@TeleOp(name="Concept: VuMark Id Webcam", group ="Concept")
|
||||
@Disabled
|
||||
public class ConceptVuMarkIdentificationWebcam extends LinearOpMode {
|
||||
|
||||
public static final String TAG = "Vuforia VuMark Sample";
|
||||
|
||||
OpenGLMatrix lastLocation = null;
|
||||
|
||||
/**
|
||||
* {@link #vuforia} is the variable we will use to store our instance of the Vuforia
|
||||
* localization engine.
|
||||
*/
|
||||
VuforiaLocalizer vuforia;
|
||||
|
||||
/**
|
||||
* This is the webcam we are to use. As with other hardware devices such as motors and
|
||||
* servos, this device is identified using the robot configuration tool in the FTC application.
|
||||
*/
|
||||
WebcamName webcamName;
|
||||
|
||||
@Override public void runOpMode() {
|
||||
|
||||
/*
|
||||
* Retrieve the camera we are to use.
|
||||
*/
|
||||
webcamName = hardwareMap.get(WebcamName.class, "Webcam 1");
|
||||
|
||||
/*
|
||||
* To start up Vuforia, tell it the view that we wish to use for camera monitor (on the RC phone);
|
||||
* If no camera monitor is desired, use the parameterless constructor instead (commented out below).
|
||||
*/
|
||||
int cameraMonitorViewId = hardwareMap.appContext.getResources().getIdentifier("cameraMonitorViewId", "id", hardwareMap.appContext.getPackageName());
|
||||
VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters(cameraMonitorViewId);
|
||||
|
||||
// OR... Do Not Activate the Camera Monitor View, to save power
|
||||
// VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters();
|
||||
|
||||
/*
|
||||
* IMPORTANT: You need to obtain your own license key to use Vuforia. The string below with which
|
||||
* 'parameters.vuforiaLicenseKey' is initialized is for illustration only, and will not function.
|
||||
* A Vuforia 'Development' license key, can be obtained free of charge from the Vuforia developer
|
||||
* web site at https://developer.vuforia.com/license-manager.
|
||||
*
|
||||
* Vuforia license keys are always 380 characters long, and look as if they contain mostly
|
||||
* random data. As an example, here is a example of a fragment of a valid key:
|
||||
* ... yIgIzTqZ4mWjk9wd3cZO9T1axEqzuhxoGlfOOI2dRzKS4T0hQ8kT ...
|
||||
* Once you've obtained a license key, copy the string from the Vuforia web site
|
||||
* and paste it in to your code on the next line, between the double quotes.
|
||||
*/
|
||||
parameters.vuforiaLicenseKey = " -- YOUR NEW VUFORIA KEY GOES HERE --- ";
|
||||
|
||||
|
||||
/**
|
||||
* We also indicate which camera on the RC we wish to use. For pedagogical purposes,
|
||||
* we use the same logic as in {@link ConceptVuforiaNavigationWebcam}.
|
||||
*/
|
||||
parameters.cameraName = webcamName;
|
||||
this.vuforia = ClassFactory.getInstance().createVuforia(parameters);
|
||||
|
||||
/**
|
||||
* Load the data set containing the VuMarks for Relic Recovery. There's only one trackable
|
||||
* in this data set: all three of the VuMarks in the game were created from this one template,
|
||||
* but differ in their instance id information.
|
||||
* @see VuMarkInstanceId
|
||||
*/
|
||||
VuforiaTrackables relicTrackables = this.vuforia.loadTrackablesFromAsset("RelicVuMark");
|
||||
VuforiaTrackable relicTemplate = relicTrackables.get(0);
|
||||
relicTemplate.setName("relicVuMarkTemplate"); // can help in debugging; otherwise not necessary
|
||||
|
||||
telemetry.addData(">", "Press Play to start");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
relicTrackables.activate();
|
||||
|
||||
while (opModeIsActive()) {
|
||||
|
||||
/**
|
||||
* See if any of the instances of {@link relicTemplate} are currently visible.
|
||||
* {@link RelicRecoveryVuMark} is an enum which can have the following values:
|
||||
* UNKNOWN, LEFT, CENTER, and RIGHT. When a VuMark is visible, something other than
|
||||
* UNKNOWN will be returned by {@link RelicRecoveryVuMark#from(VuforiaTrackable)}.
|
||||
*/
|
||||
RelicRecoveryVuMark vuMark = RelicRecoveryVuMark.from(relicTemplate);
|
||||
if (vuMark != RelicRecoveryVuMark.UNKNOWN) {
|
||||
|
||||
/* Found an instance of the template. In the actual game, you will probably
|
||||
* loop until this condition occurs, then move on to act accordingly depending
|
||||
* on which VuMark was visible. */
|
||||
telemetry.addData("VuMark", "%s visible", vuMark);
|
||||
|
||||
/* For fun, we also exhibit the navigational pose. In the Relic Recovery game,
|
||||
* it is perhaps unlikely that you will actually need to act on this pose information, but
|
||||
* we illustrate it nevertheless, for completeness. */
|
||||
OpenGLMatrix pose = ((VuforiaTrackableDefaultListener)relicTemplate.getListener()).getFtcCameraFromTarget();
|
||||
telemetry.addData("Pose", format(pose));
|
||||
|
||||
/* We further illustrate how to decompose the pose into useful rotational and
|
||||
* translational components */
|
||||
if (pose != null) {
|
||||
VectorF trans = pose.getTranslation();
|
||||
Orientation rot = Orientation.getOrientation(pose, AxesReference.EXTRINSIC, AxesOrder.XYZ, AngleUnit.DEGREES);
|
||||
|
||||
// Extract the X, Y, and Z components of the offset of the target relative to the robot
|
||||
double tX = trans.get(0);
|
||||
double tY = trans.get(1);
|
||||
double tZ = trans.get(2);
|
||||
|
||||
// Extract the rotational components of the target relative to the robot
|
||||
double rX = rot.firstAngle;
|
||||
double rY = rot.secondAngle;
|
||||
double rZ = rot.thirdAngle;
|
||||
}
|
||||
}
|
||||
else {
|
||||
telemetry.addData("VuMark", "not visible");
|
||||
}
|
||||
|
||||
telemetry.update();
|
||||
}
|
||||
}
|
||||
|
||||
String format(OpenGLMatrix transformationMatrix) {
|
||||
return (transformationMatrix != null) ? transformationMatrix.formatAsTransform() : "null";
|
||||
}
|
||||
}
|
@ -1,203 +0,0 @@
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import com.qualcomm.robotcore.hardware.DcMotor;
|
||||
import com.qualcomm.robotcore.util.Range;
|
||||
|
||||
import org.firstinspires.ftc.robotcore.external.ClassFactory;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.OpenGLMatrix;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.VectorF;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackable;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackableDefaultListener;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackables;
|
||||
|
||||
/**
|
||||
* This OpMode illustrates using a webcam to locate and drive towards ANY Vuforia target.
|
||||
* The code assumes a basic two-wheel Robot Configuration with motors named left_drive and right_drive.
|
||||
* The motor directions must be set so a positive drive goes forward and a positive turn rotates to the right.
|
||||
*
|
||||
* Under manual control, the left stick will move forward/back, and the right stick will turn left/right.
|
||||
* This is called POV Joystick mode, different than Tank Drive (where each joystick controls a wheel).
|
||||
* Manually drive the robot until it displays Target data on the Driver Station.
|
||||
* Press and hold the *Left Bumper* to enable the automatic "Drive to target" mode.
|
||||
* Release the Left Bumper to return to manual driving mode.
|
||||
*
|
||||
* Use DESIRED_DISTANCE to set how close you want the robot to get to the target.
|
||||
* Speed and Turn sensitivity can be adjusted using the SPEED_GAIN and TURN_GAIN constants.
|
||||
*
|
||||
* For more Vuforia details, or to adapt this OpMode for a phone camera, view the
|
||||
* ConceptVuforiaFieldNavigation and ConceptVuforiaFieldNavigationWebcam samples.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this opmode to the Driver Station OpMode list.
|
||||
*
|
||||
* IMPORTANT: In order to use this OpMode, you need to obtain your own Vuforia license key as
|
||||
* is explained below.
|
||||
*/
|
||||
|
||||
@TeleOp(name="Drive To Target", group = "Concept")
|
||||
@Disabled
|
||||
public class ConceptVuforiaDriveToTargetWebcam extends LinearOpMode
|
||||
{
|
||||
// Adjust these numbers to suit your robot.
|
||||
final double DESIRED_DISTANCE = 8.0; // this is how close the camera should get to the target (inches)
|
||||
// The GAIN constants set the relationship between the measured position error,
|
||||
// and how much power is applied to the drive motors. Drive = Error * Gain
|
||||
// Make these values smaller for smoother control.
|
||||
final double SPEED_GAIN = 0.02 ; // Speed Control "Gain". eg: Ramp up to 50% power at a 25 inch error. (0.50 / 25.0)
|
||||
final double TURN_GAIN = 0.01 ; // Turn Control "Gain". eg: Ramp up to 25% power at a 25 degree error. (0.25 / 25.0)
|
||||
|
||||
final double MM_PER_INCH = 25.40 ; // Metric conversion
|
||||
|
||||
/*
|
||||
* IMPORTANT: You need to obtain your own license key to use Vuforia. The string below with which
|
||||
* 'parameters.vuforiaLicenseKey' is initialized is for illustration only, and will not function.
|
||||
* A Vuforia 'Development' license key, can be obtained free of charge from the Vuforia developer
|
||||
* web site at https://developer.vuforia.com/license-manager.
|
||||
*
|
||||
* Vuforia license keys are always 380 characters long, and look as if they contain mostly
|
||||
* random data. As an example, here is a example of a fragment of a valid key:
|
||||
* ... yIgIzTqZ4mWjk9wd3cZO9T1axEqzuhxoGlfOOI2dRzKS4T0hQ8kT ...
|
||||
* Once you've obtained a license key, copy the string from the Vuforia web site
|
||||
* and paste it in to your code on the next line, between the double quotes.
|
||||
*/
|
||||
private static final String VUFORIA_KEY =
|
||||
" --- YOUR NEW VUFORIA KEY GOES HERE --- ";
|
||||
|
||||
VuforiaLocalizer vuforia = null;
|
||||
OpenGLMatrix targetPose = null;
|
||||
String targetName = "";
|
||||
|
||||
private DcMotor leftDrive = null;
|
||||
private DcMotor rightDrive = null;
|
||||
|
||||
@Override public void runOpMode()
|
||||
{
|
||||
/*
|
||||
* Configure Vuforia by creating a Parameter object, and passing it to the Vuforia engine.
|
||||
* To get an on-phone camera preview, use the code below.
|
||||
* If no camera preview is desired, use the parameter-less constructor instead (commented out below).
|
||||
*/
|
||||
int cameraMonitorViewId = hardwareMap.appContext.getResources().getIdentifier("cameraMonitorViewId", "id", hardwareMap.appContext.getPackageName());
|
||||
VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters(cameraMonitorViewId);
|
||||
// VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters();
|
||||
|
||||
parameters.vuforiaLicenseKey = VUFORIA_KEY;
|
||||
|
||||
// Turn off Extended tracking. Set this true if you want Vuforia to track beyond the target.
|
||||
parameters.useExtendedTracking = false;
|
||||
|
||||
// Connect to the camera we are to use. This name must match what is set up in Robot Configuration
|
||||
parameters.cameraName = hardwareMap.get(WebcamName.class, "Webcam 1");
|
||||
this.vuforia = ClassFactory.getInstance().createVuforia(parameters);
|
||||
|
||||
// Load the trackable objects from the Assets file, and give them meaningful names
|
||||
VuforiaTrackables targetsPowerPlay = this.vuforia.loadTrackablesFromAsset("PowerPlay");
|
||||
targetsPowerPlay.get(0).setName("Red Audience Wall");
|
||||
targetsPowerPlay.get(1).setName("Red Rear Wall");
|
||||
targetsPowerPlay.get(2).setName("Blue Audience Wall");
|
||||
targetsPowerPlay.get(3).setName("Blue Rear Wall");
|
||||
|
||||
// Start tracking targets in the background
|
||||
targetsPowerPlay.activate();
|
||||
|
||||
// Initialize the hardware variables. Note that the strings used here as parameters
|
||||
// to 'get' must correspond to the names assigned during the robot configuration
|
||||
// step (using the FTC Robot Controller app on the phone).
|
||||
leftDrive = hardwareMap.get(DcMotor.class, "left_drive");
|
||||
rightDrive = hardwareMap.get(DcMotor.class, "right_drive");
|
||||
|
||||
// To drive forward, most robots need the motor on one side to be reversed, because the axles point in opposite directions.
|
||||
// When run, this OpMode should start both motors driving forward. So adjust these two lines based on your first test drive.
|
||||
// Note: The settings here assume direct drive on left and right wheels. Gear Reduction or 90 Deg drives may require direction flips
|
||||
leftDrive.setDirection(DcMotor.Direction.REVERSE);
|
||||
rightDrive.setDirection(DcMotor.Direction.FORWARD);
|
||||
|
||||
telemetry.addData(">", "Press Play to start");
|
||||
telemetry.update();
|
||||
|
||||
waitForStart();
|
||||
|
||||
boolean targetFound = false; // Set to true when a target is detected by Vuforia
|
||||
double targetRange = 0; // Distance from camera to target in Inches
|
||||
double targetBearing = 0; // Robot Heading, relative to target. Positive degrees means target is to the right.
|
||||
double drive = 0; // Desired forward power (-1 to +1)
|
||||
double turn = 0; // Desired turning power (-1 to +1)
|
||||
|
||||
while (opModeIsActive())
|
||||
{
|
||||
// Look for first visible target, and save its pose.
|
||||
targetFound = false;
|
||||
for (VuforiaTrackable trackable : targetsPowerPlay)
|
||||
{
|
||||
if (((VuforiaTrackableDefaultListener) trackable.getListener()).isVisible())
|
||||
{
|
||||
targetPose = ((VuforiaTrackableDefaultListener)trackable.getListener()).getVuforiaCameraFromTarget();
|
||||
|
||||
// if we have a target, process the "pose" to determine the position of the target relative to the robot.
|
||||
if (targetPose != null)
|
||||
{
|
||||
targetFound = true;
|
||||
targetName = trackable.getName();
|
||||
VectorF trans = targetPose.getTranslation();
|
||||
|
||||
// Extract the X & Y components of the offset of the target relative to the robot
|
||||
double targetX = trans.get(0) / MM_PER_INCH; // Image X axis
|
||||
double targetY = trans.get(2) / MM_PER_INCH; // Image Z axis
|
||||
|
||||
// target range is based on distance from robot position to origin (right triangle).
|
||||
targetRange = Math.hypot(targetX, targetY);
|
||||
|
||||
// target bearing is based on angle formed between the X axis and the target range line
|
||||
targetBearing = Math.toDegrees(Math.asin(targetX / targetRange));
|
||||
|
||||
break; // jump out of target tracking loop if we find a target.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tell the driver what we see, and what to do.
|
||||
if (targetFound) {
|
||||
telemetry.addData(">","HOLD Left-Bumper to Drive to Target\n");
|
||||
telemetry.addData("Target", " %s", targetName);
|
||||
telemetry.addData("Range", "%5.1f inches", targetRange);
|
||||
telemetry.addData("Bearing","%3.0f degrees", targetBearing);
|
||||
} else {
|
||||
telemetry.addData(">","Drive using joystick to find target\n");
|
||||
}
|
||||
|
||||
// Drive to target Automatically if Left Bumper is being pressed, AND we have found a target.
|
||||
if (gamepad1.left_bumper && targetFound) {
|
||||
|
||||
// Determine heading and range error so we can use them to control the robot automatically.
|
||||
double rangeError = (targetRange - DESIRED_DISTANCE);
|
||||
double headingError = targetBearing;
|
||||
|
||||
// Use the speed and turn "gains" to calculate how we want the robot to move.
|
||||
drive = rangeError * SPEED_GAIN;
|
||||
turn = headingError * TURN_GAIN ;
|
||||
|
||||
telemetry.addData("Auto","Drive %5.2f, Turn %5.2f", drive, turn);
|
||||
} else {
|
||||
|
||||
// drive using manual POV Joystick mode.
|
||||
drive = -gamepad1.left_stick_y / 2.0; // Reduce drive rate to 50%.
|
||||
turn = gamepad1.right_stick_x / 4.0; // Reduce turn rate to 25%.
|
||||
telemetry.addData("Manual","Drive %5.2f, Turn %5.2f", drive, turn);
|
||||
}
|
||||
telemetry.update();
|
||||
|
||||
// Calculate left and right wheel powers and send to them to the motors.
|
||||
double leftPower = Range.clip(drive + turn, -1.0, 1.0) ;
|
||||
double rightPower = Range.clip(drive - turn, -1.0, 1.0) ;
|
||||
leftDrive.setPower(leftPower);
|
||||
rightDrive.setPower(rightPower);
|
||||
|
||||
sleep(10);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,283 +0,0 @@
|
||||
/* Copyright (c) 2019 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
|
||||
import org.firstinspires.ftc.robotcore.external.ClassFactory;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.OpenGLMatrix;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.VectorF;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.Orientation;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackable;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackableDefaultListener;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackables;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.firstinspires.ftc.robotcore.external.navigation.AngleUnit.DEGREES;
|
||||
import static org.firstinspires.ftc.robotcore.external.navigation.AxesOrder.XYZ;
|
||||
import static org.firstinspires.ftc.robotcore.external.navigation.AxesOrder.YZX;
|
||||
import static org.firstinspires.ftc.robotcore.external.navigation.AxesReference.EXTRINSIC;
|
||||
import static org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer.CameraDirection.BACK;
|
||||
|
||||
/**
|
||||
* This OpMode illustrates using the Vuforia localizer to determine positioning and orientation of
|
||||
* robot on the FTC field using the RC phone's camera. The code is structured as a LinearOpMode
|
||||
*
|
||||
* Note: If you are using a WEBCAM see ConceptVuforiaFieldNavigationWebcam.java
|
||||
*
|
||||
* When images are located, Vuforia is able to determine the position and orientation of the
|
||||
* image relative to the camera. This sample code then combines that information with a
|
||||
* knowledge of where the target images are on the field, to determine the location of the camera.
|
||||
*
|
||||
* Finally, the location of the camera on the robot is used to determine the
|
||||
* robot's location and orientation on the field.
|
||||
*
|
||||
* To learn more about the FTC field coordinate model, see FTC_FieldCoordinateSystemDefinition.pdf in this folder
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*
|
||||
* IMPORTANT: In order to use this OpMode, you need to obtain your own Vuforia license key as
|
||||
* is explained below.
|
||||
*/
|
||||
|
||||
@TeleOp(name="Vuforia Field Nav", group ="Concept")
|
||||
@Disabled
|
||||
public class ConceptVuforiaFieldNavigation extends LinearOpMode {
|
||||
|
||||
// IMPORTANT: For Phone Camera, set 1) the camera source and 2) the orientation, based on how your phone is mounted:
|
||||
// 1) Camera Source. Valid choices are: BACK (behind screen) or FRONT (selfie side)
|
||||
// 2) Phone Orientation. Choices are: PHONE_IS_PORTRAIT = true (portrait) or PHONE_IS_PORTRAIT = false (landscape)
|
||||
|
||||
private static final VuforiaLocalizer.CameraDirection CAMERA_CHOICE = BACK;
|
||||
private static final boolean PHONE_IS_PORTRAIT = false ;
|
||||
|
||||
/*
|
||||
* IMPORTANT: You need to obtain your own license key to use Vuforia. The string below with which
|
||||
* 'parameters.vuforiaLicenseKey' is initialized is for illustration only, and will not function.
|
||||
* A Vuforia 'Development' license key, can be obtained free of charge from the Vuforia developer
|
||||
* web site at https://developer.vuforia.com/license-manager.
|
||||
*
|
||||
* Vuforia license keys are always 380 characters long, and look as if they contain mostly
|
||||
* random data. As an example, here is a example of a fragment of a valid key:
|
||||
* ... yIgIzTqZ4mWjk9wd3cZO9T1axEqzuhxoGlfOOI2dRzKS4T0hQ8kT ...
|
||||
* Once you've obtained a license key, copy the string from the Vuforia web site
|
||||
* and paste it in to your code on the next line, between the double quotes.
|
||||
*/
|
||||
private static final String VUFORIA_KEY =
|
||||
" -- YOUR NEW VUFORIA KEY GOES HERE --- ";
|
||||
|
||||
// Since ImageTarget trackables use mm to specifiy their dimensions, we must use mm for all the physical dimension.
|
||||
// We will define some constants and conversions here. These are useful for the FTC competition field.
|
||||
private static final float mmPerInch = 25.4f;
|
||||
private static final float mmTargetHeight = 6 * mmPerInch; // the height of the center of the target image above the floor
|
||||
private static final float halfField = 72 * mmPerInch;
|
||||
private static final float halfTile = 12 * mmPerInch;
|
||||
private static final float oneAndHalfTile = 36 * mmPerInch;
|
||||
|
||||
// Class Members
|
||||
private OpenGLMatrix lastLocation = null;
|
||||
private VuforiaLocalizer vuforia = null;
|
||||
private VuforiaTrackables targets = null ;
|
||||
|
||||
private boolean targetVisible = false;
|
||||
private float phoneXRotate = 0;
|
||||
private float phoneYRotate = 0;
|
||||
private float phoneZRotate = 0;
|
||||
|
||||
@Override public void runOpMode() {
|
||||
/*
|
||||
* Configure Vuforia by creating a Parameter object, and passing it to the Vuforia engine.
|
||||
* To get an on-phone camera preview, use the code below.
|
||||
* If no camera preview is desired, use the parameter-less constructor instead (commented out below).
|
||||
*/
|
||||
int cameraMonitorViewId = hardwareMap.appContext.getResources().getIdentifier("cameraMonitorViewId", "id", hardwareMap.appContext.getPackageName());
|
||||
VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters(cameraMonitorViewId);
|
||||
// VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters();
|
||||
|
||||
parameters.vuforiaLicenseKey = VUFORIA_KEY;
|
||||
parameters.cameraDirection = CAMERA_CHOICE;
|
||||
|
||||
// Turn off Extended tracking. Set this true if you want Vuforia to track beyond the target.
|
||||
parameters.useExtendedTracking = false;
|
||||
|
||||
// Instantiate the Vuforia engine
|
||||
vuforia = ClassFactory.getInstance().createVuforia(parameters);
|
||||
|
||||
// Load the trackable assets.
|
||||
targets = this.vuforia.loadTrackablesFromAsset("PowerPlay");
|
||||
|
||||
// For convenience, gather together all the trackable objects in one easily-iterable collection */
|
||||
List<VuforiaTrackable> allTrackables = new ArrayList<VuforiaTrackable>();
|
||||
allTrackables.addAll(targets);
|
||||
|
||||
/**
|
||||
* In order for localization to work, we need to tell the system where each target is on the field, and
|
||||
* where the phone resides on the robot. These specifications are in the form of <em>transformation matrices.</em>
|
||||
* Transformation matrices are a central, important concept in the math here involved in localization.
|
||||
* See <a href="https://en.wikipedia.org/wiki/Transformation_matrix">Transformation Matrix</a>
|
||||
* for detailed information. Commonly, you'll encounter transformation matrices as instances
|
||||
* of the {@link OpenGLMatrix} class.
|
||||
*
|
||||
* If you are standing in the Red Alliance Station looking towards the center of the field,
|
||||
* - The X axis runs from your left to the right. (positive from the center to the right)
|
||||
* - The Y axis runs from the Red Alliance Station towards the other side of the field
|
||||
* where the Blue Alliance Station is. (Positive is from the center, towards the BlueAlliance station)
|
||||
* - The Z axis runs from the floor, upwards towards the ceiling. (Positive is above the floor)
|
||||
*
|
||||
* Before being transformed, each target image is conceptually located at the origin of the field's
|
||||
* coordinate system (the center of the field), facing up.
|
||||
*/
|
||||
|
||||
// Name and locate each trackable object
|
||||
identifyTarget(0, "Red Audience Wall", -halfField, -oneAndHalfTile, mmTargetHeight, 90, 0, 90);
|
||||
identifyTarget(1, "Red Rear Wall", halfField, -oneAndHalfTile, mmTargetHeight, 90, 0, -90);
|
||||
identifyTarget(2, "Blue Audience Wall", -halfField, oneAndHalfTile, mmTargetHeight, 90, 0, 90);
|
||||
identifyTarget(3, "Blue Rear Wall", halfField, oneAndHalfTile, mmTargetHeight, 90, 0, -90);
|
||||
|
||||
/*
|
||||
* Create a transformation matrix describing where the phone is on the robot.
|
||||
*
|
||||
* NOTE !!!! It's very important that you turn OFF your phone's Auto-Screen-Rotation option.
|
||||
* Lock it into Portrait for these numbers to work.
|
||||
*
|
||||
* Info: The coordinate frame for the robot looks the same as the field.
|
||||
* The robot's "forward" direction is facing out along X axis, with the LEFT side facing out along the Y axis.
|
||||
* Z is UP on the robot. This equates to a heading angle of Zero degrees.
|
||||
*
|
||||
* The phone starts out lying flat, with the screen facing Up and with the physical top of the phone
|
||||
* pointing to the LEFT side of the Robot.
|
||||
* The two examples below assume that the camera is facing forward out the front of the robot.
|
||||
*/
|
||||
|
||||
// We need to rotate the camera around its long axis to bring the correct camera forward.
|
||||
if (CAMERA_CHOICE == BACK) {
|
||||
phoneYRotate = -90;
|
||||
} else {
|
||||
phoneYRotate = 90;
|
||||
}
|
||||
|
||||
// Rotate the phone vertical about the X axis if it's in portrait mode
|
||||
if (PHONE_IS_PORTRAIT) {
|
||||
phoneXRotate = 90 ;
|
||||
}
|
||||
|
||||
// Next, translate the camera lens to where it is on the robot.
|
||||
// In this example, it is centered on the robot (left-to-right and front-to-back), and 6 inches above ground level.
|
||||
final float CAMERA_FORWARD_DISPLACEMENT = 0.0f * mmPerInch; // eg: Enter the forward distance from the center of the robot to the camera lens
|
||||
final float CAMERA_VERTICAL_DISPLACEMENT = 6.0f * mmPerInch; // eg: Camera is 6 Inches above ground
|
||||
final float CAMERA_LEFT_DISPLACEMENT = 0.0f * mmPerInch; // eg: Enter the left distance from the center of the robot to the camera lens
|
||||
|
||||
OpenGLMatrix robotFromCamera = OpenGLMatrix
|
||||
.translation(CAMERA_FORWARD_DISPLACEMENT, CAMERA_LEFT_DISPLACEMENT, CAMERA_VERTICAL_DISPLACEMENT)
|
||||
.multiplied(Orientation.getRotationMatrix(EXTRINSIC, YZX, DEGREES, phoneYRotate, phoneZRotate, phoneXRotate));
|
||||
|
||||
/** Let all the trackable listeners know where the phone is. */
|
||||
for (VuforiaTrackable trackable : allTrackables) {
|
||||
((VuforiaTrackableDefaultListener) trackable.getListener()).setPhoneInformation(robotFromCamera, parameters.cameraDirection);
|
||||
}
|
||||
|
||||
/*
|
||||
* WARNING:
|
||||
* In this sample, we do not wait for PLAY to be pressed. Target Tracking is started immediately when INIT is pressed.
|
||||
* This sequence is used to enable the new remote DS Camera Stream feature to be used with this sample.
|
||||
* CONSEQUENTLY do not put any driving commands in this loop.
|
||||
* To restore the normal opmode structure, just un-comment the following line:
|
||||
*/
|
||||
|
||||
// waitForStart();
|
||||
|
||||
/* Note: To use the remote camera preview:
|
||||
* AFTER you hit Init on the Driver Station, use the "options menu" to select "Camera Stream"
|
||||
* Tap the preview window to receive a fresh image.
|
||||
* It is not permitted to transition to RUN while the camera preview window is active.
|
||||
* Either press STOP to exit the OpMode, or use the "options menu" again, and select "Camera Stream" to close the preview window.
|
||||
*/
|
||||
|
||||
targets.activate();
|
||||
while (!isStopRequested()) {
|
||||
|
||||
// check all the trackable targets to see which one (if any) is visible.
|
||||
targetVisible = false;
|
||||
for (VuforiaTrackable trackable : allTrackables) {
|
||||
if (((VuforiaTrackableDefaultListener)trackable.getListener()).isVisible()) {
|
||||
telemetry.addData("Visible Target", trackable.getName());
|
||||
targetVisible = true;
|
||||
|
||||
// getUpdatedRobotLocation() will return null if no new information is available since
|
||||
// the last time that call was made, or if the trackable is not currently visible.
|
||||
OpenGLMatrix robotLocationTransform = ((VuforiaTrackableDefaultListener)trackable.getListener()).getUpdatedRobotLocation();
|
||||
if (robotLocationTransform != null) {
|
||||
lastLocation = robotLocationTransform;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Provide feedback as to where the robot is located (if we know).
|
||||
if (targetVisible) {
|
||||
// express position (translation) of robot in inches.
|
||||
VectorF translation = lastLocation.getTranslation();
|
||||
telemetry.addData("Pos (inches)", "{X, Y, Z} = %.1f, %.1f, %.1f",
|
||||
translation.get(0) / mmPerInch, translation.get(1) / mmPerInch, translation.get(2) / mmPerInch);
|
||||
|
||||
// express the rotation of the robot in degrees.
|
||||
Orientation rotation = Orientation.getOrientation(lastLocation, EXTRINSIC, XYZ, DEGREES);
|
||||
telemetry.addData("Rot (deg)", "{Roll, Pitch, Heading} = %.0f, %.0f, %.0f", rotation.firstAngle, rotation.secondAngle, rotation.thirdAngle);
|
||||
}
|
||||
else {
|
||||
telemetry.addData("Visible Target", "none");
|
||||
}
|
||||
telemetry.update();
|
||||
}
|
||||
|
||||
// Disable Tracking when we are done;
|
||||
targets.deactivate();
|
||||
}
|
||||
|
||||
/***
|
||||
* Identify a target by naming it, and setting its position and orientation on the field
|
||||
* @param targetIndex
|
||||
* @param targetName
|
||||
* @param dx, dy, dz Target offsets in x,y,z axes
|
||||
* @param rx, ry, rz Target rotations in x,y,z axes
|
||||
*/
|
||||
void identifyTarget(int targetIndex, String targetName, float dx, float dy, float dz, float rx, float ry, float rz) {
|
||||
VuforiaTrackable aTarget = targets.get(targetIndex);
|
||||
aTarget.setName(targetName);
|
||||
aTarget.setLocation(OpenGLMatrix.translation(dx, dy, dz)
|
||||
.multiplied(Orientation.getRotationMatrix(EXTRINSIC, XYZ, DEGREES, rx, ry, rz)));
|
||||
}
|
||||
}
|
@ -1,274 +0,0 @@
|
||||
/* Copyright (c) 2019 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
|
||||
import org.firstinspires.ftc.robotcore.external.ClassFactory;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.OpenGLMatrix;
|
||||
import org.firstinspires.ftc.robotcore.external.matrices.VectorF;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.Orientation;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaLocalizer;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackable;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackableDefaultListener;
|
||||
import org.firstinspires.ftc.robotcore.external.navigation.VuforiaTrackables;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.firstinspires.ftc.robotcore.external.navigation.AngleUnit.DEGREES;
|
||||
import static org.firstinspires.ftc.robotcore.external.navigation.AxesOrder.XYZ;
|
||||
import static org.firstinspires.ftc.robotcore.external.navigation.AxesOrder.XZY;
|
||||
import static org.firstinspires.ftc.robotcore.external.navigation.AxesReference.EXTRINSIC;
|
||||
|
||||
/**
|
||||
* This OpMode illustrates using the Vuforia localizer to determine positioning and orientation of
|
||||
* robot on the FTC field using a WEBCAM. The code is structured as a LinearOpMode
|
||||
*
|
||||
* NOTE: If you are running on a Phone with a built-in camera, use the ConceptVuforiaFieldNavigation example instead of this one.
|
||||
* NOTE: It is possible to switch between multiple WebCams (eg: one for the left side and one for the right).
|
||||
* For a related example of how to do this, see ConceptTensorFlowObjectDetectionSwitchableCameras
|
||||
*
|
||||
* When images are located, Vuforia is able to determine the position and orientation of the
|
||||
* image relative to the camera. This sample code then combines that information with a
|
||||
* knowledge of where the target images are on the field, to determine the location of the camera.
|
||||
*
|
||||
* Finally, the location of the camera on the robot is used to determine the
|
||||
* robot's location and orientation on the field.
|
||||
*
|
||||
* To learn more about the FTC field coordinate model, see FTC_FieldCoordinateSystemDefinition.pdf in this folder
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into your team's code folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*
|
||||
* IMPORTANT: In order to use this OpMode, you need to obtain your own Vuforia license key as
|
||||
* is explained below.
|
||||
*/
|
||||
|
||||
@TeleOp(name="Vuforia Field Nav Webcam", group ="Concept")
|
||||
@Disabled
|
||||
public class ConceptVuforiaFieldNavigationWebcam extends LinearOpMode {
|
||||
|
||||
/*
|
||||
* IMPORTANT: You need to obtain your own license key to use Vuforia. The string below with which
|
||||
* 'parameters.vuforiaLicenseKey' is initialized is for illustration only, and will not function.
|
||||
* A Vuforia 'Development' license key, can be obtained free of charge from the Vuforia developer
|
||||
* web site at https://developer.vuforia.com/license-manager.
|
||||
*
|
||||
* Vuforia license keys are always 380 characters long, and look as if they contain mostly
|
||||
* random data. As an example, here is a example of a fragment of a valid key:
|
||||
* ... yIgIzTqZ4mWjk9wd3cZO9T1axEqzuhxoGlfOOI2dRzKS4T0hQ8kT ...
|
||||
* Once you've obtained a license key, copy the string from the Vuforia web site
|
||||
* and paste it in to your code on the next line, between the double quotes.
|
||||
*/
|
||||
private static final String VUFORIA_KEY =
|
||||
" --- YOUR NEW VUFORIA KEY GOES HERE --- ";
|
||||
|
||||
// Since ImageTarget trackables use mm to specifiy their dimensions, we must use mm for all the physical dimension.
|
||||
// We will define some constants and conversions here
|
||||
private static final float mmPerInch = 25.4f;
|
||||
private static final float mmTargetHeight = 6 * mmPerInch; // the height of the center of the target image above the floor
|
||||
private static final float halfField = 72 * mmPerInch;
|
||||
private static final float halfTile = 12 * mmPerInch;
|
||||
private static final float oneAndHalfTile = 36 * mmPerInch;
|
||||
|
||||
// Class Members
|
||||
private OpenGLMatrix lastLocation = null;
|
||||
private VuforiaLocalizer vuforia = null;
|
||||
private VuforiaTrackables targets = null ;
|
||||
private WebcamName webcamName = null;
|
||||
|
||||
private boolean targetVisible = false;
|
||||
|
||||
@Override public void runOpMode() {
|
||||
// Connect to the camera we are to use. This name must match what is set up in Robot Configuration
|
||||
webcamName = hardwareMap.get(WebcamName.class, "Webcam 1");
|
||||
|
||||
/*
|
||||
* Configure Vuforia by creating a Parameter object, and passing it to the Vuforia engine.
|
||||
* We can pass Vuforia the handle to a camera preview resource (on the RC screen);
|
||||
* If no camera-preview is desired, use the parameter-less constructor instead (commented out below).
|
||||
* Note: A preview window is required if you want to view the camera stream on the Driver Station Phone.
|
||||
*/
|
||||
int cameraMonitorViewId = hardwareMap.appContext.getResources().getIdentifier("cameraMonitorViewId", "id", hardwareMap.appContext.getPackageName());
|
||||
VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters(cameraMonitorViewId);
|
||||
// VuforiaLocalizer.Parameters parameters = new VuforiaLocalizer.Parameters();
|
||||
|
||||
parameters.vuforiaLicenseKey = VUFORIA_KEY;
|
||||
|
||||
// We also indicate which camera we wish to use.
|
||||
parameters.cameraName = webcamName;
|
||||
|
||||
// Turn off Extended tracking. Set this true if you want Vuforia to track beyond the target.
|
||||
parameters.useExtendedTracking = false;
|
||||
|
||||
// Instantiate the Vuforia engine
|
||||
vuforia = ClassFactory.getInstance().createVuforia(parameters);
|
||||
|
||||
// Load the data sets for the trackable objects. These particular data
|
||||
// sets are stored in the 'assets' part of our application.
|
||||
targets = this.vuforia.loadTrackablesFromAsset("PowerPlay");
|
||||
|
||||
// For convenience, gather together all the trackable objects in one easily-iterable collection */
|
||||
List<VuforiaTrackable> allTrackables = new ArrayList<VuforiaTrackable>();
|
||||
allTrackables.addAll(targets);
|
||||
|
||||
/**
|
||||
* In order for localization to work, we need to tell the system where each target is on the field, and
|
||||
* where the phone resides on the robot. These specifications are in the form of <em>transformation matrices.</em>
|
||||
* Transformation matrices are a central, important concept in the math here involved in localization.
|
||||
* See <a href="https://en.wikipedia.org/wiki/Transformation_matrix">Transformation Matrix</a>
|
||||
* for detailed information. Commonly, you'll encounter transformation matrices as instances
|
||||
* of the {@link OpenGLMatrix} class.
|
||||
*
|
||||
* If you are standing in the Red Alliance Station looking towards the center of the field,
|
||||
* - The X axis runs from your left to the right. (positive from the center to the right)
|
||||
* - The Y axis runs from the Red Alliance Station towards the other side of the field
|
||||
* where the Blue Alliance Station is. (Positive is from the center, towards the BlueAlliance station)
|
||||
* - The Z axis runs from the floor, upwards towards the ceiling. (Positive is above the floor)
|
||||
*
|
||||
* Before being transformed, each target image is conceptually located at the origin of the field's
|
||||
* coordinate system (the center of the field), facing up.
|
||||
*/
|
||||
|
||||
// Name and locate each trackable object
|
||||
identifyTarget(0, "Red Audience Wall", -halfField, -oneAndHalfTile, mmTargetHeight, 90, 0, 90);
|
||||
identifyTarget(1, "Red Rear Wall", halfField, -oneAndHalfTile, mmTargetHeight, 90, 0, -90);
|
||||
identifyTarget(2, "Blue Audience Wall", -halfField, oneAndHalfTile, mmTargetHeight, 90, 0, 90);
|
||||
identifyTarget(3, "Blue Rear Wall", halfField, oneAndHalfTile, mmTargetHeight, 90, 0, -90);
|
||||
|
||||
/*
|
||||
* Create a transformation matrix describing where the camera is on the robot.
|
||||
*
|
||||
* Info: The coordinate frame for the robot looks the same as the field.
|
||||
* The robot's "forward" direction is facing out along X axis, with the LEFT side facing out along the Y axis.
|
||||
* Z is UP on the robot. This equates to a bearing angle of Zero degrees.
|
||||
*
|
||||
* For a WebCam, the default starting orientation of the camera is looking UP (pointing in the Z direction),
|
||||
* with the wide (horizontal) axis of the camera aligned with the X axis, and
|
||||
* the Narrow (vertical) axis of the camera aligned with the Y axis
|
||||
*
|
||||
* But, this example assumes that the camera is actually facing forward out the front of the robot.
|
||||
* So, the "default" camera position requires two rotations to get it oriented correctly.
|
||||
* 1) First it must be rotated +90 degrees around the X axis to get it horizontal (its now facing out the right side of the robot)
|
||||
* 2) Next it must be be rotated +90 degrees (counter-clockwise) around the Z axis to face forward.
|
||||
*
|
||||
* Finally the camera can be translated to its actual mounting position on the robot.
|
||||
* In this example, it is centered on the robot (left-to-right and front-to-back), and 6 inches above ground level.
|
||||
*/
|
||||
|
||||
final float CAMERA_FORWARD_DISPLACEMENT = 0.0f * mmPerInch; // eg: Enter the forward distance from the center of the robot to the camera lens
|
||||
final float CAMERA_VERTICAL_DISPLACEMENT = 6.0f * mmPerInch; // eg: Camera is 6 Inches above ground
|
||||
final float CAMERA_LEFT_DISPLACEMENT = 0.0f * mmPerInch; // eg: Enter the left distance from the center of the robot to the camera lens
|
||||
|
||||
OpenGLMatrix cameraLocationOnRobot = OpenGLMatrix
|
||||
.translation(CAMERA_FORWARD_DISPLACEMENT, CAMERA_LEFT_DISPLACEMENT, CAMERA_VERTICAL_DISPLACEMENT)
|
||||
.multiplied(Orientation.getRotationMatrix(EXTRINSIC, XZY, DEGREES, 90, 90, 0));
|
||||
|
||||
/** Let all the trackable listeners know where the camera is. */
|
||||
for (VuforiaTrackable trackable : allTrackables) {
|
||||
((VuforiaTrackableDefaultListener) trackable.getListener()).setCameraLocationOnRobot(parameters.cameraName, cameraLocationOnRobot);
|
||||
}
|
||||
|
||||
/*
|
||||
* WARNING:
|
||||
* In this sample, we do not wait for PLAY to be pressed. Target Tracking is started immediately when INIT is pressed.
|
||||
* This sequence is used to enable the new remote DS Camera Preview feature to be used with this sample.
|
||||
* CONSEQUENTLY do not put any driving commands in this loop.
|
||||
* To restore the normal opmode structure, just un-comment the following line:
|
||||
*/
|
||||
|
||||
// waitForStart();
|
||||
|
||||
/* Note: To use the remote camera preview:
|
||||
* AFTER you hit Init on the Driver Station, use the "options menu" to select "Camera Stream"
|
||||
* Tap the preview window to receive a fresh image.
|
||||
* It is not permitted to transition to RUN while the camera preview window is active.
|
||||
* Either press STOP to exit the OpMode, or use the "options menu" again, and select "Camera Stream" to close the preview window.
|
||||
*/
|
||||
|
||||
targets.activate();
|
||||
while (!isStopRequested()) {
|
||||
|
||||
// check all the trackable targets to see which one (if any) is visible.
|
||||
targetVisible = false;
|
||||
for (VuforiaTrackable trackable : allTrackables) {
|
||||
if (((VuforiaTrackableDefaultListener)trackable.getListener()).isVisible()) {
|
||||
telemetry.addData("Visible Target", trackable.getName());
|
||||
targetVisible = true;
|
||||
|
||||
// getUpdatedRobotLocation() will return null if no new information is available since
|
||||
// the last time that call was made, or if the trackable is not currently visible.
|
||||
OpenGLMatrix robotLocationTransform = ((VuforiaTrackableDefaultListener)trackable.getListener()).getUpdatedRobotLocation();
|
||||
if (robotLocationTransform != null) {
|
||||
lastLocation = robotLocationTransform;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Provide feedback as to where the robot is located (if we know).
|
||||
if (targetVisible) {
|
||||
// express position (translation) of robot in inches.
|
||||
VectorF translation = lastLocation.getTranslation();
|
||||
telemetry.addData("Pos (inches)", "{X, Y, Z} = %.1f, %.1f, %.1f",
|
||||
translation.get(0) / mmPerInch, translation.get(1) / mmPerInch, translation.get(2) / mmPerInch);
|
||||
|
||||
// express the rotation of the robot in degrees.
|
||||
Orientation rotation = Orientation.getOrientation(lastLocation, EXTRINSIC, XYZ, DEGREES);
|
||||
telemetry.addData("Rot (deg)", "{Roll, Pitch, Heading} = %.0f, %.0f, %.0f", rotation.firstAngle, rotation.secondAngle, rotation.thirdAngle);
|
||||
}
|
||||
else {
|
||||
telemetry.addData("Visible Target", "none");
|
||||
}
|
||||
telemetry.update();
|
||||
}
|
||||
|
||||
// Disable Tracking when we are done;
|
||||
targets.deactivate();
|
||||
}
|
||||
|
||||
/***
|
||||
* Identify a target by naming it, and setting its position and orientation on the field
|
||||
* @param targetIndex
|
||||
* @param targetName
|
||||
* @param dx, dy, dz Target offsets in x,y,z axes
|
||||
* @param rx, ry, rz Target rotations in x,y,z axes
|
||||
*/
|
||||
void identifyTarget(int targetIndex, String targetName, float dx, float dy, float dz, float rx, float ry, float rz) {
|
||||
VuforiaTrackable aTarget = targets.get(targetIndex);
|
||||
aTarget.setName(targetName);
|
||||
aTarget.setLocation(OpenGLMatrix.translation(dx, dy, dz)
|
||||
.multiplied(Orientation.getRotationMatrix(EXTRINSIC, XYZ, DEGREES, rx, ry, rz)));
|
||||
}
|
||||
}
|
Binary file not shown.
@ -72,7 +72,7 @@ import org.firstinspires.ftc.robotcore.external.navigation.Orientation;
|
||||
* The angle of movement/rotation is assumed to be a standardized rotation around the robot Z axis,
|
||||
* which means that a Positive rotation is Counter Clockwise, looking down on the field.
|
||||
* This is consistent with the FTC field coordinate conventions set out in the document:
|
||||
* ftc_app\doc\tutorial\FTC_FieldCoordinateSystemDefinition.pdf
|
||||
* https://ftc-docs.firstinspires.org/field-coordinate-system
|
||||
*
|
||||
* Control Approach.
|
||||
*
|
||||
|
@ -0,0 +1,293 @@
|
||||
/* Copyright (c) 2023 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import com.qualcomm.robotcore.hardware.DcMotor;
|
||||
import com.qualcomm.robotcore.util.Range;
|
||||
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.BuiltinCameraDirection;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.controls.ExposureControl;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.controls.GainControl;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagDetection;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagProcessor;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* This OpMode illustrates using a camera to locate and drive towards a specific AprilTag.
|
||||
* The code assumes a Holonomic (Mecanum or X Drive) Robot.
|
||||
*
|
||||
* The drive goal is to rotate to keep the Tag centered in the camera, while strafing to be directly in front of the tag, and
|
||||
* driving towards the tag to achieve the desired distance.
|
||||
* To reduce any motion blur (which will interrupt the detection process) the Camera exposure is reduced to a very low value (5mS)
|
||||
* You can determine the best Exposure and Gain values by using the ConceptAprilTagOptimizeExposure OpMode in this Samples folder.
|
||||
*
|
||||
* The code assumes a Robot Configuration with motors named: leftfront_drive and rightfront_drive, leftback_drive and rightback_drive.
|
||||
* The motor directions must be set so a positive power goes forward on all wheels.
|
||||
* This sample assumes that the current game AprilTag Library (usually for the current season) is being loaded by default,
|
||||
* so you should choose to approach a valid tag ID (usually starting at 0)
|
||||
*
|
||||
* Under manual control, the left stick will move forward/back & left/right. The right stick will rotate the robot.
|
||||
* Manually drive the robot until it displays Target data on the Driver Station.
|
||||
*
|
||||
* Press and hold the *Left Bumper* to enable the automatic "Drive to target" mode.
|
||||
* Release the Left Bumper to return to manual driving mode.
|
||||
*
|
||||
* Under "Drive To Target" mode, the robot has three goals:
|
||||
* 1) Turn the robot to always keep the Tag centered on the camera frame. (Use the Target Bearing to turn the robot.)
|
||||
* 2) Strafe the robot towards the centerline of the Tag, so it approaches directly in front of the tag. (Use the Target Yaw to strafe the robot)
|
||||
* 3) Drive towards the Tag to get to the desired distance. (Use Tag Range to drive the robot forward/backward)
|
||||
*
|
||||
* Use DESIRED_DISTANCE to set how close you want the robot to get to the target.
|
||||
* Speed and Turn sensitivity can be adjusted using the SPEED_GAIN, STRAFE_GAIN and TURN_GAIN constants.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into the TeamCode/src/main/java/org/firstinspires/ftc/teamcode folder.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*
|
||||
*/
|
||||
|
||||
@TeleOp(name="Omni Drive To AprilTag", group = "Concept")
|
||||
@Disabled
|
||||
public class RobotAutoDriveToAprilTagOmni extends LinearOpMode
|
||||
{
|
||||
// Adjust these numbers to suit your robot.
|
||||
final double DESIRED_DISTANCE = 12.0; // this is how close the camera should get to the target (inches)
|
||||
|
||||
// Set the GAIN constants to control the relationship between the measured position error, and how much power is
|
||||
// applied to the drive motors to correct the error.
|
||||
// Drive = Error * Gain Make these values smaller for smoother control, or larger for a more aggressive response.
|
||||
final double SPEED_GAIN = 0.02 ; // Forward Speed Control "Gain". eg: Ramp up to 50% power at a 25 inch error. (0.50 / 25.0)
|
||||
final double STRAFE_GAIN = 0.015 ; // Strafe Speed Control "Gain". eg: Ramp up to 25% power at a 25 degree Yaw error. (0.25 / 25.0)
|
||||
final double TURN_GAIN = 0.01 ; // Turn Control "Gain". eg: Ramp up to 25% power at a 25 degree error. (0.25 / 25.0)
|
||||
|
||||
final double MAX_AUTO_SPEED = 0.5; // Clip the approach speed to this max value (adjust for your robot)
|
||||
final double MAX_AUTO_STRAFE= 0.5; // Clip the approach speed to this max value (adjust for your robot)
|
||||
final double MAX_AUTO_TURN = 0.3; // Clip the turn speed to this max value (adjust for your robot)
|
||||
|
||||
private DcMotor leftFrontDrive = null; // Used to control the left front drive wheel
|
||||
private DcMotor rightFrontDrive = null; // Used to control the right front drive wheel
|
||||
private DcMotor leftBackDrive = null; // Used to control the left back drive wheel
|
||||
private DcMotor rightBackDrive = null; // Used to control the right back drive wheel
|
||||
|
||||
private static final boolean USE_WEBCAM = true; // Set true to use a webcam, or false for a phone camera
|
||||
private static final int DESIRED_TAG_ID = 0; // Choose the tag you want to approach or set to -1 for ANY tag.
|
||||
private VisionPortal visionPortal; // Used to manage the video source.
|
||||
private AprilTagProcessor aprilTag; // Used for managing the AprilTag detection process.
|
||||
private AprilTagDetection desiredTag = null; // Used to hold the data for a detected AprilTag
|
||||
|
||||
@Override public void runOpMode()
|
||||
{
|
||||
boolean targetFound = false; // Set to true when an AprilTag target is detected
|
||||
double drive = 0; // Desired forward power/speed (-1 to +1)
|
||||
double strafe = 0; // Desired strafe power/speed (-1 to +1)
|
||||
double turn = 0; // Desired turning power/speed (-1 to +1)
|
||||
|
||||
// Initialize the Apriltag Detection process
|
||||
initAprilTag();
|
||||
|
||||
// Initialize the hardware variables. Note that the strings used here as parameters
|
||||
// to 'get' must match the names assigned during the robot configuration.
|
||||
// step (using the FTC Robot Controller app on the phone).
|
||||
leftFrontDrive = hardwareMap.get(DcMotor.class, "leftfront_drive");
|
||||
rightFrontDrive = hardwareMap.get(DcMotor.class, "rightfront_drive");
|
||||
leftBackDrive = hardwareMap.get(DcMotor.class, "leftback_drive");
|
||||
rightBackDrive = hardwareMap.get(DcMotor.class, "rightback_drive");
|
||||
|
||||
// To drive forward, most robots need the motor on one side to be reversed, because the axles point in opposite directions.
|
||||
// When run, this OpMode should start both motors driving forward. So adjust these two lines based on your first test drive.
|
||||
// Note: The settings here assume direct drive on left and right wheels. Gear Reduction or 90 Deg drives may require direction flips
|
||||
leftFrontDrive.setDirection(DcMotor.Direction.REVERSE);
|
||||
leftBackDrive.setDirection(DcMotor.Direction.REVERSE);
|
||||
rightFrontDrive.setDirection(DcMotor.Direction.FORWARD);
|
||||
rightBackDrive.setDirection(DcMotor.Direction.FORWARD);
|
||||
|
||||
if (USE_WEBCAM)
|
||||
setManualExposure(6, 250); // Use low exposure time to reduce motion blur
|
||||
|
||||
// Wait for driver to press start
|
||||
telemetry.addData("Camera preview on/off", "3 dots, Camera Stream");
|
||||
telemetry.addData(">", "Touch Play to start OpMode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
while (opModeIsActive())
|
||||
{
|
||||
targetFound = false;
|
||||
desiredTag = null;
|
||||
|
||||
// Step through the list of detected tags and look for a matching tag
|
||||
List<AprilTagDetection> currentDetections = aprilTag.getDetections();
|
||||
for (AprilTagDetection detection : currentDetections) {
|
||||
if ((detection.metadata != null)
|
||||
&& ((DESIRED_TAG_ID >= 0) || (detection.id == DESIRED_TAG_ID)) ){
|
||||
targetFound = true;
|
||||
desiredTag = detection;
|
||||
break; // don't look any further.
|
||||
}
|
||||
}
|
||||
|
||||
// Tell the driver what we see, and what to do.
|
||||
if (targetFound) {
|
||||
telemetry.addData(">","HOLD Left-Bumper to Drive to Target\n");
|
||||
telemetry.addData("Target", "ID %d (%s)", desiredTag.id, desiredTag.metadata.name);
|
||||
telemetry.addData("Range", "%5.1f inches", desiredTag.ftcPose.range);
|
||||
telemetry.addData("Bearing","%3.0f degrees", desiredTag.ftcPose.bearing);
|
||||
telemetry.addData("Yaw","%3.0f degrees", desiredTag.ftcPose.yaw);
|
||||
} else {
|
||||
telemetry.addData(">","Drive using joystick to find target\n");
|
||||
}
|
||||
|
||||
// If Left Bumper is being pressed, AND we have found the desired target, Drive to target Automatically .
|
||||
if (gamepad1.left_bumper && targetFound) {
|
||||
|
||||
// Determine heading, range and Yaw (tag image rotation) error so we can use them to control the robot automatically.
|
||||
double rangeError = (desiredTag.ftcPose.range - DESIRED_DISTANCE);
|
||||
double headingError = desiredTag.ftcPose.bearing;
|
||||
double yawError = desiredTag.ftcPose.yaw;
|
||||
|
||||
// Use the speed and turn "gains" to calculate how we want the robot to move.
|
||||
drive = Range.clip(rangeError * SPEED_GAIN, -MAX_AUTO_SPEED, MAX_AUTO_SPEED);
|
||||
turn = Range.clip(headingError * TURN_GAIN, -MAX_AUTO_TURN, MAX_AUTO_TURN) ;
|
||||
strafe = Range.clip(-yawError * STRAFE_GAIN, -MAX_AUTO_STRAFE, MAX_AUTO_STRAFE);
|
||||
|
||||
telemetry.addData("Auto","Drive %5.2f, Strafe %5.2f, Turn %5.2f ", drive, strafe, turn);
|
||||
} else {
|
||||
|
||||
// drive using manual POV Joystick mode. Slow things down to make the robot more controlable.
|
||||
drive = -gamepad1.left_stick_y / 2.0; // Reduce drive rate to 50%.
|
||||
strafe = -gamepad1.left_stick_x / 2.0; // Reduce strafe rate to 50%.
|
||||
turn = -gamepad1.right_stick_x / 3.0; // Reduce turn rate to 33%.
|
||||
telemetry.addData("Manual","Drive %5.2f, Strafe %5.2f, Turn %5.2f ", drive, strafe, turn);
|
||||
}
|
||||
telemetry.update();
|
||||
|
||||
// Apply desired axes motions to the drivetrain.
|
||||
moveRobot(drive, strafe, turn);
|
||||
sleep(10);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Move robot according to desired axes motions
|
||||
* Positive X is forward
|
||||
* Positive Y is strafe left
|
||||
* Positive Yaw is counter-clockwise
|
||||
*/
|
||||
public void moveRobot(double x, double y, double yaw) {
|
||||
// Calculate wheel powers.
|
||||
double leftFrontPower = x -y -yaw;
|
||||
double rightFrontPower = x +y +yaw;
|
||||
double leftBackPower = x +y -yaw;
|
||||
double rightBackPower = x -y +yaw;
|
||||
|
||||
// Normalize wheel powers to be less than 1.0
|
||||
double max = Math.max(Math.abs(leftFrontPower), Math.abs(rightFrontPower));
|
||||
max = Math.max(max, Math.abs(leftBackPower));
|
||||
max = Math.max(max, Math.abs(rightBackPower));
|
||||
|
||||
if (max > 1.0) {
|
||||
leftFrontPower /= max;
|
||||
rightFrontPower /= max;
|
||||
leftBackPower /= max;
|
||||
rightBackPower /= max;
|
||||
}
|
||||
|
||||
// Send powers to the wheels.
|
||||
leftFrontDrive.setPower(leftFrontPower);
|
||||
rightFrontDrive.setPower(rightFrontPower);
|
||||
leftBackDrive.setPower(leftBackPower);
|
||||
rightBackDrive.setPower(rightBackPower);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the AprilTag processor.
|
||||
*/
|
||||
private void initAprilTag() {
|
||||
// Create the AprilTag processor by using a builder.
|
||||
aprilTag = new AprilTagProcessor.Builder().build();
|
||||
|
||||
// Create the vision portal by using a builder.
|
||||
if (USE_WEBCAM) {
|
||||
visionPortal = new VisionPortal.Builder()
|
||||
.setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"))
|
||||
.addProcessor(aprilTag)
|
||||
.build();
|
||||
} else {
|
||||
visionPortal = new VisionPortal.Builder()
|
||||
.setCamera(BuiltinCameraDirection.BACK)
|
||||
.addProcessor(aprilTag)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Manually set the camera gain and exposure.
|
||||
This can only be called AFTER calling initAprilTag(), and only works for Webcams;
|
||||
*/
|
||||
private void setManualExposure(int exposureMS, int gain) {
|
||||
// Wait for the camera to be open, then use the controls
|
||||
|
||||
if (visionPortal == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure camera is streaming before we try to set the exposure controls
|
||||
if (visionPortal.getCameraState() != VisionPortal.CameraState.STREAMING) {
|
||||
telemetry.addData("Camera", "Waiting");
|
||||
telemetry.update();
|
||||
while (!isStopRequested() && (visionPortal.getCameraState() != VisionPortal.CameraState.STREAMING)) {
|
||||
sleep(20);
|
||||
}
|
||||
telemetry.addData("Camera", "Ready");
|
||||
telemetry.update();
|
||||
}
|
||||
|
||||
// Set camera controls unless we are stopping.
|
||||
if (!isStopRequested())
|
||||
{
|
||||
ExposureControl exposureControl = visionPortal.getCameraControl(ExposureControl.class);
|
||||
if (exposureControl.getMode() != ExposureControl.Mode.Manual) {
|
||||
exposureControl.setMode(ExposureControl.Mode.Manual);
|
||||
sleep(50);
|
||||
}
|
||||
exposureControl.setExposure((long)exposureMS, TimeUnit.MILLISECONDS);
|
||||
sleep(20);
|
||||
GainControl gainControl = visionPortal.getCameraControl(GainControl.class);
|
||||
gainControl.setGain(gain);
|
||||
sleep(20);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,271 @@
|
||||
/* Copyright (c) 2023 FIRST. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to endorse or
|
||||
* promote products derived from this software without specific prior written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
import com.qualcomm.robotcore.hardware.DcMotor;
|
||||
import com.qualcomm.robotcore.util.Range;
|
||||
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.BuiltinCameraDirection;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.controls.ExposureControl;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.controls.GainControl;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagDetection;
|
||||
import org.firstinspires.ftc.vision.apriltag.AprilTagProcessor;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* This OpMode illustrates using a camera to locate and drive towards a specific AprilTag.
|
||||
* The code assumes a basic two-wheel (Tank) Robot Drivetrain
|
||||
*
|
||||
* The driving goal is to rotate to keep the tag centered in the camera, while driving towards the tag to achieve the desired distance.
|
||||
* To reduce any motion blur (which will interrupt the detection process) the Camera exposure is reduced to a very low value (5mS)
|
||||
* You can determine the best exposure and gain values by using the ConceptAprilTagOptimizeExposure OpMode in this Samples folder.
|
||||
*
|
||||
* The code assumes a Robot Configuration with motors named left_drive and right_drive.
|
||||
* The motor directions must be set so a positive power goes forward on both wheels;
|
||||
* This sample assumes that the default AprilTag Library (usually for the current season) is being loaded by default
|
||||
* so you should choose to approach a valid tag ID (usually starting at 0)
|
||||
*
|
||||
* Under manual control, the left stick will move forward/back, and the right stick will rotate the robot.
|
||||
* This is called POV Joystick mode, different than Tank Drive (where each joystick controls a wheel).
|
||||
*
|
||||
* Manually drive the robot until it displays Target data on the Driver Station.
|
||||
* Press and hold the *Left Bumper* to enable the automatic "Drive to target" mode.
|
||||
* Release the Left Bumper to return to manual driving mode.
|
||||
*
|
||||
* Under "Drive To Target" mode, the robot has two goals:
|
||||
* 1) Turn the robot to always keep the Tag centered on the camera frame. (Use the Target Bearing to turn the robot.)
|
||||
* 2) Drive towards the Tag to get to the desired distance. (Use Tag Range to drive the robot forward/backward)
|
||||
*
|
||||
* Use DESIRED_DISTANCE to set how close you want the robot to get to the target.
|
||||
* Speed and Turn sensitivity can be adjusted using the SPEED_GAIN and TURN_GAIN constants.
|
||||
*
|
||||
* Use Android Studio to Copy this Class, and Paste it into the TeamCode/src/main/java/org/firstinspires/ftc/teamcode folder.
|
||||
* Remove or comment out the @Disabled line to add this OpMode to the Driver Station OpMode list.
|
||||
*
|
||||
*/
|
||||
|
||||
@TeleOp(name="Tank Drive To AprilTag", group = "Concept")
|
||||
@Disabled
|
||||
public class RobotAutoDriveToAprilTagTank extends LinearOpMode
|
||||
{
|
||||
// Adjust these numbers to suit your robot.
|
||||
final double DESIRED_DISTANCE = 12.0; // this is how close the camera should get to the target (inches)
|
||||
|
||||
// Set the GAIN constants to control the relationship between the measured position error, and how much power is
|
||||
// applied to the drive motors to correct the error.
|
||||
// Drive = Error * Gain Make these values smaller for smoother control, or larger for a more aggressive response.
|
||||
final double SPEED_GAIN = 0.02 ; // Speed Control "Gain". eg: Ramp up to 50% power at a 25 inch error. (0.50 / 25.0)
|
||||
final double TURN_GAIN = 0.01 ; // Turn Control "Gain". eg: Ramp up to 25% power at a 25 degree error. (0.25 / 25.0)
|
||||
|
||||
final double MAX_AUTO_SPEED = 0.5; // Clip the approach speed to this max value (adjust for your robot)
|
||||
final double MAX_AUTO_TURN = 0.25; // Clip the turn speed to this max value (adjust for your robot)
|
||||
|
||||
private DcMotor leftDrive = null; // Used to control the left drive wheel
|
||||
private DcMotor rightDrive = null; // Used to control the right drive wheel
|
||||
|
||||
private static final boolean USE_WEBCAM = true; // Set true to use a webcam, or false for a phone camera
|
||||
private static final int DESIRED_TAG_ID = 0; // Choose the tag you want to approach or set to -1 for ANY tag.
|
||||
private VisionPortal visionPortal; // Used to manage the video source.
|
||||
private AprilTagProcessor aprilTag; // Used for managing the AprilTag detection process.
|
||||
private AprilTagDetection desiredTag = null; // Used to hold the data for a detected AprilTag
|
||||
|
||||
@Override public void runOpMode()
|
||||
{
|
||||
boolean targetFound = false; // Set to true when an AprilTag target is detected
|
||||
double drive = 0; // Desired forward power/speed (-1 to +1) +ve is forward
|
||||
double turn = 0; // Desired turning power/speed (-1 to +1) +ve is CounterClockwise
|
||||
|
||||
// Initialize the Apriltag Detection process
|
||||
initAprilTag();
|
||||
|
||||
// Initialize the hardware variables. Note that the strings used here as parameters
|
||||
// to 'get' must match the names assigned during the robot configuration.
|
||||
// step (using the FTC Robot Controller app on the phone).
|
||||
leftDrive = hardwareMap.get(DcMotor.class, "left_drive");
|
||||
rightDrive = hardwareMap.get(DcMotor.class, "right_drive");
|
||||
|
||||
// To drive forward, most robots need the motor on one side to be reversed because the axles point in opposite directions.
|
||||
// When run, this OpMode should start both motors driving forward. So adjust these two lines based on your first test drive.
|
||||
// Note: The settings here assume direct drive on left and right wheels. Single Gear Reduction or 90 Deg drives may require direction flips
|
||||
leftDrive.setDirection(DcMotor.Direction.REVERSE);
|
||||
rightDrive.setDirection(DcMotor.Direction.FORWARD);
|
||||
|
||||
if (USE_WEBCAM)
|
||||
setManualExposure(6, 250); // Use low exposure time to reduce motion blur
|
||||
|
||||
// Wait for the driver to press Start
|
||||
telemetry.addData("Camera preview on/off", "3 dots, Camera Stream");
|
||||
telemetry.addData(">", "Touch Play to start OpMode");
|
||||
telemetry.update();
|
||||
waitForStart();
|
||||
|
||||
while (opModeIsActive())
|
||||
{
|
||||
targetFound = false;
|
||||
desiredTag = null;
|
||||
|
||||
// Step through the list of detected tags and look for a matching tag
|
||||
List<AprilTagDetection> currentDetections = aprilTag.getDetections();
|
||||
for (AprilTagDetection detection : currentDetections) {
|
||||
if ((detection.metadata != null)
|
||||
&& ((DESIRED_TAG_ID >= 0) || (detection.id == DESIRED_TAG_ID)) ){
|
||||
targetFound = true;
|
||||
desiredTag = detection;
|
||||
break; // don't look any further.
|
||||
}
|
||||
}
|
||||
|
||||
// Tell the driver what we see, and what to do.
|
||||
if (targetFound) {
|
||||
telemetry.addData(">","HOLD Left-Bumper to Drive to Target\n");
|
||||
telemetry.addData("Target", "ID %d (%s)", desiredTag.id, desiredTag.metadata.name);
|
||||
telemetry.addData("Range", "%5.1f inches", desiredTag.ftcPose.range);
|
||||
telemetry.addData("Bearing","%3.0f degrees", desiredTag.ftcPose.bearing);
|
||||
} else {
|
||||
telemetry.addData(">","Drive using joystick to find target\n");
|
||||
}
|
||||
|
||||
// If Left Bumper is being pressed, AND we have found the desired target, Drive to target Automatically .
|
||||
if (gamepad1.left_bumper && targetFound) {
|
||||
|
||||
// Determine heading and range error so we can use them to control the robot automatically.
|
||||
double rangeError = (desiredTag.ftcPose.range - DESIRED_DISTANCE);
|
||||
double headingError = desiredTag.ftcPose.bearing;
|
||||
|
||||
// Use the speed and turn "gains" to calculate how we want the robot to move. Clip it to the maximum
|
||||
drive = Range.clip(rangeError * SPEED_GAIN, -MAX_AUTO_SPEED, MAX_AUTO_SPEED);
|
||||
turn = Range.clip(headingError * TURN_GAIN, -MAX_AUTO_TURN, MAX_AUTO_TURN) ;
|
||||
|
||||
telemetry.addData("Auto","Drive %5.2f, Turn %5.2f", drive, turn);
|
||||
} else {
|
||||
|
||||
// drive using manual POV Joystick mode.
|
||||
drive = -gamepad1.left_stick_y / 2.0; // Reduce drive rate to 50%.
|
||||
turn = -gamepad1.right_stick_x / 4.0; // Reduce turn rate to 25%.
|
||||
telemetry.addData("Manual","Drive %5.2f, Turn %5.2f", drive, turn);
|
||||
}
|
||||
telemetry.update();
|
||||
|
||||
// Apply desired axes motions to the drivetrain.
|
||||
moveRobot(drive, turn);
|
||||
sleep(10);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Move robot according to desired axes motions
|
||||
* Positive X is forward
|
||||
* Positive Yaw is counter-clockwise
|
||||
*/
|
||||
public void moveRobot(double x, double yaw) {
|
||||
// Calculate left and right wheel powers.
|
||||
double leftPower = x - yaw;
|
||||
double rightPower = x + yaw;
|
||||
|
||||
// Normalize wheel powers to be less than 1.0
|
||||
double max = Math.max(Math.abs(leftPower), Math.abs(rightPower));
|
||||
if (max >1.0) {
|
||||
leftPower /= max;
|
||||
rightPower /= max;
|
||||
}
|
||||
|
||||
// Send powers to the wheels.
|
||||
leftDrive.setPower(leftPower);
|
||||
rightDrive.setPower(rightPower);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the AprilTag processor.
|
||||
*/
|
||||
private void initAprilTag() {
|
||||
// Create the AprilTag processor by using a builder.
|
||||
aprilTag = new AprilTagProcessor.Builder().build();
|
||||
|
||||
// Create the vision portal by using a builder.
|
||||
if (USE_WEBCAM) {
|
||||
visionPortal = new VisionPortal.Builder()
|
||||
.setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"))
|
||||
.addProcessor(aprilTag)
|
||||
.build();
|
||||
} else {
|
||||
visionPortal = new VisionPortal.Builder()
|
||||
.setCamera(BuiltinCameraDirection.BACK)
|
||||
.addProcessor(aprilTag)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Manually set the camera gain and exposure.
|
||||
This can only be called AFTER calling initAprilTag(), and only works for Webcams;
|
||||
*/
|
||||
private void setManualExposure(int exposureMS, int gain) {
|
||||
// Wait for the camera to be open, then use the controls
|
||||
|
||||
if (visionPortal == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure camera is streaming before we try to set the exposure controls
|
||||
if (visionPortal.getCameraState() != VisionPortal.CameraState.STREAMING) {
|
||||
telemetry.addData("Camera", "Waiting");
|
||||
telemetry.update();
|
||||
while (!isStopRequested() && (visionPortal.getCameraState() != VisionPortal.CameraState.STREAMING)) {
|
||||
sleep(20);
|
||||
}
|
||||
telemetry.addData("Camera", "Ready");
|
||||
telemetry.update();
|
||||
}
|
||||
|
||||
// Set camera controls unless we are stopping.
|
||||
if (!isStopRequested())
|
||||
{
|
||||
ExposureControl exposureControl = visionPortal.getCameraControl(ExposureControl.class);
|
||||
if (exposureControl.getMode() != ExposureControl.Mode.Manual) {
|
||||
exposureControl.setMode(ExposureControl.Mode.Manual);
|
||||
sleep(50);
|
||||
}
|
||||
exposureControl.setExposure((long)exposureMS, TimeUnit.MILLISECONDS);
|
||||
sleep(20);
|
||||
GainControl gainControl = visionPortal.getCameraControl(GainControl.class);
|
||||
gainControl.setGain(gain);
|
||||
sleep(20);
|
||||
telemetry.addData("Camera", "Ready");
|
||||
telemetry.update();
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,129 @@
|
||||
/*
|
||||
* Copyright (c) 2023 FIRST
|
||||
*
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted (subject to the limitations in the disclaimer below) provided that
|
||||
* the following conditions are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright notice, this list
|
||||
* of conditions and the following disclaimer.
|
||||
*
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
* list of conditions and the following disclaimer in the documentation and/or
|
||||
* other materials provided with the distribution.
|
||||
*
|
||||
* Neither the name of FIRST nor the names of its contributors may be used to
|
||||
* endorse or promote products derived from this software without specific prior
|
||||
* written permission.
|
||||
*
|
||||
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
|
||||
* LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
|
||||
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package org.firstinspires.ftc.robotcontroller.external.samples;
|
||||
|
||||
import android.util.Size;
|
||||
|
||||
import com.qualcomm.robotcore.eventloop.opmode.Disabled;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
|
||||
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
|
||||
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.BuiltinCameraDirection;
|
||||
import org.firstinspires.ftc.robotcore.external.hardware.camera.WebcamName;
|
||||
import org.firstinspires.ftc.vision.VisionPortal;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* This Utility OpMode helps calibrate a webcam or RC phone camera, useful for AprilTag pose estimation
|
||||
* with the FTC VisionPortal. It captures a camera frame (image) and stores it on the Robot Controller
|
||||
* (Control Hub or RC phone), with each press of the gamepad button X (or Square).
|
||||
* Full calibration instructions are here:
|
||||
*
|
||||
* https://ftc-docs.firstinspires.org/camera-calibration
|
||||
*
|
||||
* In Android Studio, copy this class into your "teamcode" folder with a new name.
|
||||
* Remove or comment out the @Disabled line to add this file to the Driver Station OpMode list.
|
||||
*
|
||||
* In OnBot Java, use "Add File" to add this file from the list of Samples.
|
||||
*/
|
||||
|
||||
@TeleOp(name = "Utility: Camera Frame Capture", group = "Utility")
|
||||
@Disabled
|
||||
public class UtilityCameraFrameCapture extends LinearOpMode
|
||||
{
|
||||
/*
|
||||
* EDIT THESE PARAMETERS AS NEEDED
|
||||
*/
|
||||
final boolean USING_WEBCAM = false;
|
||||
final BuiltinCameraDirection INTERNAL_CAM_DIR = BuiltinCameraDirection.BACK;
|
||||
final int RESOLUTION_WIDTH = 640;
|
||||
final int RESOLUTION_HEIGHT = 480;
|
||||
|
||||
// Internal state
|
||||
boolean lastX;
|
||||
int frameCount;
|
||||
long capReqTime;
|
||||
|
||||
@Override
|
||||
public void runOpMode()
|
||||
{
|
||||
VisionPortal portal;
|
||||
|
||||
if (USING_WEBCAM)
|
||||
{
|
||||
portal = new VisionPortal.Builder()
|
||||
.setCamera(hardwareMap.get(WebcamName.class, "Webcam 1"))
|
||||
.setCameraResolution(new Size(RESOLUTION_WIDTH, RESOLUTION_HEIGHT))
|
||||
.build();
|
||||
}
|
||||
else
|
||||
{
|
||||
portal = new VisionPortal.Builder()
|
||||
.setCamera(INTERNAL_CAM_DIR)
|
||||
.setCameraResolution(new Size(RESOLUTION_WIDTH, RESOLUTION_HEIGHT))
|
||||
.build();
|
||||
}
|
||||
|
||||
while (!isStopRequested())
|
||||
{
|
||||
boolean x = gamepad1.x;
|
||||
|
||||
if (x && !lastX)
|
||||
{
|
||||
portal.saveNextFrameRaw(String.format(Locale.US, "CameraFrameCapture-%06d", frameCount++));
|
||||
capReqTime = System.currentTimeMillis();
|
||||
}
|
||||
|
||||
lastX = x;
|
||||
|
||||
telemetry.addLine("######## Camera Capture Utility ########");
|
||||
telemetry.addLine(String.format(Locale.US, " > Resolution: %dx%d", RESOLUTION_WIDTH, RESOLUTION_HEIGHT));
|
||||
telemetry.addLine(" > Press X (or Square) to capture a frame");
|
||||
telemetry.addData(" > Camera Status", portal.getCameraState());
|
||||
|
||||
if (capReqTime != 0)
|
||||
{
|
||||
telemetry.addLine("\nCaptured Frame!");
|
||||
}
|
||||
|
||||
if (capReqTime != 0 && System.currentTimeMillis() - capReqTime > 1000)
|
||||
{
|
||||
capReqTime = 0;
|
||||
}
|
||||
|
||||
telemetry.update();
|
||||
}
|
||||
}
|
||||
}
|
@ -625,8 +625,8 @@ public class FtcRobotControllerActivity extends Activity
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the orientation of monitorContainer (which contains cameraMonitorView and
|
||||
* tfodMonitorView) based on the given configuration. Makes the children split the space.
|
||||
* Updates the orientation of monitorContainer (which contains cameraMonitorView)
|
||||
* based on the given configuration. Makes the children split the space.
|
||||
*/
|
||||
private void updateMonitorLayout(Configuration configuration) {
|
||||
LinearLayout monitorContainer = (LinearLayout) findViewById(R.id.monitorContainer);
|
||||
|
@ -140,13 +140,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
android:layout_weight="1"
|
||||
android:orientation="vertical"
|
||||
/>
|
||||
<FrameLayout
|
||||
android:id="@+id/tfodMonitorViewId"
|
||||
android:visibility="gone"
|
||||
android:layout_width="match_parent"
|
||||
android:layout_height="0dp"
|
||||
android:layout_weight="1"
|
||||
/>
|
||||
|
||||
</LinearLayout>
|
||||
|
||||
|
Reference in New Issue
Block a user